diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 11a14256801a..e40b026a1318 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -39,7 +39,7 @@ repos: name: check-torchdist entry: ./scripts/check-torchdist.py language: python - exclude: ^(deepspeed/comm/|docs/|benchmarks/|scripts/check-torchdist.py|deepspeed/moe/sharded_moe.py|deepspeed/runtime/comm/coalesced_collectives.py|deepspeed/elasticity/elastic_agent.py|deepspeed/launcher/launch.py|tests/unit/comm/test_dist.py) + exclude: ^(deepspeed/comm/|docs/|benchmarks/|scripts/check-torchdist.py|deepspeed/moe/sharded_moe.py|deepspeed/runtime/comm/coalesced_collectives.py|deepspeed/elasticity/elastic_agent.py|deepspeed/launcher/launch.py|tests/unit/comm/test_dist.py|deepspeed/runtime/zero/utils.py|deepspeed/tools/pg_sim/ut/base.py|deepspeed/tools/pg_sim/pg.py|.ci/unit_tests/) # Specific deepspeed/ files are excluded for now until we wrap ProcessGroup in deepspeed.comm - repo: local diff --git a/accelerator/abstract_accelerator.py b/accelerator/abstract_accelerator.py index 3c5d799e293e..2731fdf0cdec 100644 --- a/accelerator/abstract_accelerator.py +++ b/accelerator/abstract_accelerator.py @@ -80,7 +80,7 @@ def manual_seed_all(self, seed): ... @abc.abstractmethod - def initial_seed(self, seed): + def initial_seed(self): ... @abc.abstractmethod @@ -280,6 +280,10 @@ def create_op_builder(self, class_name): def get_op_builder(self, class_name): ... + @abc.abstractmethod + def get_compile_backend(self): + ... + @abc.abstractmethod def build_extension(self): ... diff --git a/accelerator/cpu_accelerator.py b/accelerator/cpu_accelerator.py index b1aba75b4c5e..c4682ac591d2 100644 --- a/accelerator/cpu_accelerator.py +++ b/accelerator/cpu_accelerator.py @@ -85,8 +85,8 @@ def manual_seed(self, seed): def manual_seed_all(self, seed): return torch.manual_seed(seed) - def initial_seed(self, seed): - return torch.initial_seed(seed) + def initial_seed(self): + return torch.initial_seed() def default_generator(self, device_index): return torch.default_generator @@ -302,3 +302,6 @@ def build_extension(self): def export_envs(self): return [] + + def get_compile_backend(self): + return "inductor" diff --git a/accelerator/cuda_accelerator.py b/accelerator/cuda_accelerator.py index 3d5e9c168c16..ef149d59239c 100644 --- a/accelerator/cuda_accelerator.py +++ b/accelerator/cuda_accelerator.py @@ -98,8 +98,8 @@ def manual_seed(self, seed): def manual_seed_all(self, seed): return torch.cuda.manual_seed_all(seed) - def initial_seed(self, seed): - return torch.cuda.initial_seed(seed) + def initial_seed(self): + return torch.cuda.initial_seed() def default_generator(self, device_index): return torch.cuda.default_generators[device_index] @@ -360,3 +360,6 @@ def build_extension(self): def export_envs(self): return ['NCCL'] + + def get_compile_backend(self): + return "inductor" diff --git a/accelerator/hpu_accelerator.py b/accelerator/hpu_accelerator.py index 30b115e8b1ab..99e7b8e22d27 100644 --- a/accelerator/hpu_accelerator.py +++ b/accelerator/hpu_accelerator.py @@ -40,9 +40,8 @@ def handles_memory_backpressure(self): return True def device_name(self, device_index=None): - if device_index is None: - return 'hpu' - return 'hpu:{}'.format(device_index) + # ignoring device_index. + return 'hpu' def device(self, device_index=None): return torch.device(self.device_name(device_index)) @@ -73,13 +72,13 @@ def get_rng_state(self, device_index=None): return self.hpu.random.get_rng_state() def manual_seed(self, seed): - self.hpu.random.manual_seed(seed) + return self.hpu.random.manual_seed(seed) def manual_seed_all(self, seed): self.hpu.random.manual_seed_all(seed) - def initial_seed(self, seed): - self.hpu.random.initial_seed(seed) + def initial_seed(self): + return self.hpu.random.initial_seed() def default_generator(self, device_index): return self.hpu.random.default_generators[device_index] @@ -288,6 +287,17 @@ def get_op_builder(self, class_name): else: return self.class_dict['NotImplementedBuilder'] if 'NotImplementedBuilder' in self.class_dict else None + def get_compile_backend(self): + return "hpu_backend" + + #shall be removed once moving to torch.compile + def wrap_in_hpu_graph(self, module): + if self.hpu.is_lazy(): + module = self.hpu.wrap_in_hpu_graph(module) + else: + print("Warning: hpu graphs in eager mode is not supported, ignoring") + return module + def build_extension(self): from torch.utils.cpp_extension import BuildExtension return BuildExtension diff --git a/accelerator/mps_accelerator.py b/accelerator/mps_accelerator.py index 972b33caece1..c427c9555148 100644 --- a/accelerator/mps_accelerator.py +++ b/accelerator/mps_accelerator.py @@ -76,7 +76,7 @@ def manual_seed_all(self, seed): def seed(self): return torch.mps.seed() - def initial_seed(self, seed): + def initial_seed(self): return def default_generator(self, device_index): @@ -258,3 +258,6 @@ def build_extension(self): def export_envs(self): return [] + + def get_compile_backend(self): + return "inductor" diff --git a/accelerator/npu_accelerator.py b/accelerator/npu_accelerator.py index 472157e32c02..b995500204ea 100644 --- a/accelerator/npu_accelerator.py +++ b/accelerator/npu_accelerator.py @@ -83,8 +83,8 @@ def manual_seed(self, seed): def manual_seed_all(self, seed): return torch.npu.manual_seed_all(seed) - def initial_seed(self, seed): - return torch.npu.initial_seed(seed) + def initial_seed(self): + return torch.npu.initial_seed() def default_generator(self, device_index): return torch.npu.default_generators[device_index] @@ -278,3 +278,6 @@ def build_extension(self): def export_envs(self): return ['ASCEND', 'HCCL', 'LD_LIBRARY', 'PATH'] + + def get_compile_backend(self): + return "inductor" diff --git a/accelerator/xpu_accelerator.py b/accelerator/xpu_accelerator.py index 3f65263946ab..a658bc0c6ada 100644 --- a/accelerator/xpu_accelerator.py +++ b/accelerator/xpu_accelerator.py @@ -72,8 +72,8 @@ def manual_seed(self, seed): def manual_seed_all(self, seed): return torch.xpu.manual_seed_all(seed) - def initial_seed(self, seed): - return torch.xpu.initial_seed(seed) + def initial_seed(self): + return torch.xpu.initial_seed() def default_generator(self, device_index): return torch.xpu.default_generators[device_index] diff --git a/build.txt b/build.txt new file mode 100644 index 000000000000..d983a4989d2e --- /dev/null +++ b/build.txt @@ -0,0 +1 @@ ++hpu.synapse.v1.17.1 diff --git a/csrc/adagrad/cpu_adagrad.cpp b/csrc/adagrad/cpu_adagrad.cpp index 563255176500..5790e79e2bc2 100644 --- a/csrc/adagrad/cpu_adagrad.cpp +++ b/csrc/adagrad/cpu_adagrad.cpp @@ -5,55 +5,38 @@ #include "cpu_adagrad.h" #include +#include #include +#include #include #include #include -#if defined(__ENABLE_CUDA__) -#include -#include "cublas_v2.h" -#include "cuda.h" -#include "curand.h" -#include "custom_cuda_layers.h" -#endif +using namespace std::string_literals; static std::unordered_map> s_optimizers; // C++ interface -void Adagrad_Optimizer::Step_1(float* _params, - float* grads, - float* _exp_avg_sq, - size_t _param_size, - ds_half_precision_t* dev_params, - bool half_precision) +template +void Adagrad_Optimizer::Step_1(ds_params_percision_t* _params, + ds_params_percision_t* grads, + ds_state_precision_t* _exp_avg_sq, + size_t _param_size) { size_t rounded_size = 0; #if defined(__AVX512__) or defined(__AVX256__) - Step_AVX<1>( - &rounded_size, _params, grads, _exp_avg_sq, _param_size, dev_params, half_precision); + Step_AVX<1>(&rounded_size, _params, grads, _exp_avg_sq, _param_size); #endif if (_param_size > rounded_size) { float step_size = -1 * _alpha; - ds_half_precision_t* grads_cast_h; - ds_half_precision_t* params_cast_h; - if (half_precision) { - grads_cast_h = reinterpret_cast(grads); - params_cast_h = reinterpret_cast(_params); - } for (size_t t = rounded_size; t < _param_size; t += TILE) { size_t copy_size = TILE; if ((t + TILE) > _param_size) copy_size = _param_size - t; size_t offset = copy_size + t; -#if defined(__ENABLE_CUDA__) - if ((t / TILE) >= 2) { cudaStreamSynchronize(_streams[_buf_index]); } -#elif defined(__ENABLE_CANN__) - if ((t / TILE) >= 2) { aclrtSynchronizeStream(_streams[_buf_index].stream()); } -#endif #pragma omp parallel for for (size_t k = t; k < offset; k++) { - float grad = half_precision ? (float)grads_cast_h[k] : grads[k]; - float param = half_precision ? (float)params_cast_h[k] : _params[k]; + float grad = (float)grads[k]; + float param = (float)_params[k]; float momentum = grads[k]; float variance = _exp_avg_sq[k]; if (_weight_decay > 0) { grad = param * _weight_decay + grad; } @@ -64,58 +47,30 @@ void Adagrad_Optimizer::Step_1(float* _params, grad += _eps; grad = momentum / grad; param = grad * step_size + param; -#if defined(__ENABLE_CUDA__) or defined(__ENABLE_CANN__) - if (dev_params) _doubled_buffer[_buf_index][k - t] = param; -#endif - if (half_precision) - params_cast_h[k] = (ds_half_precision_t)param; - else - _params[k] = param; + _params[k] = param; // STORE UPDATE TERM TO GRAD'S MEMORY grads[k] = grad * step_size; _exp_avg_sq[k] = variance; } -#if defined(__ENABLE_CUDA__) - if (dev_params) { - launch_param_update( - _doubled_buffer[_buf_index], dev_params + t, (copy_size), _streams[_buf_index]); - _buf_index = !_buf_index; - } -#elif defined(__ENABLE_CANN__) - if (dev_params) { - size_t memcpy_size = copy_size * sizeof(_doubled_buffer[_buf_index][0]); - aclrtMemcpy(dev_params + t, - memcpy_size, - _doubled_buffer[_buf_index], - memcpy_size, - aclrtMemcpyKind::ACL_MEMCPY_HOST_TO_DEVICE); - - _buf_index = !_buf_index; - } -#endif } } } -void Adagrad_Optimizer::Step_4(float* _params, - float* grads, - float* _exp_avg_sq, - size_t _param_size, - ds_half_precision_t* dev_params, - bool half_precision) +template +void Adagrad_Optimizer::Step_4(ds_params_percision_t* _params, + ds_params_percision_t* grads, + ds_state_precision_t* _exp_avg_sq, + size_t _param_size) { size_t rounded_size = 0; #if defined(__AVX512__) or defined(__AVX256__) - Step_AVX<4>( - &rounded_size, _params, grads, _exp_avg_sq, _param_size, dev_params, half_precision); + Step_AVX<4>(&rounded_size, _params, grads, _exp_avg_sq, _param_size); #endif if (_param_size > rounded_size) Step_1((_params + rounded_size), (grads + rounded_size), (_exp_avg_sq + rounded_size), - (_param_size - rounded_size), - (dev_params != nullptr ? (dev_params + rounded_size) : dev_params), - half_precision); + (_param_size - rounded_size)); } int create_adagrad_optimizer(int optimizer_id, @@ -149,25 +104,77 @@ int create_adagrad_optimizer(int optimizer_id, return 0; } -void Adagrad_Optimizer::Step_8(float* _params, - float* grads, - float* _exp_avg_sq, - size_t _param_size, - ds_half_precision_t* dev_params, - bool half_precision) +template +void Adagrad_Optimizer::Step_8(ds_params_percision_t* _params, + ds_params_percision_t* grads, + ds_state_precision_t* _exp_avg_sq, + size_t _param_size) { size_t rounded_size = 0; #if defined(__AVX512__) or defined(__AVX256__) - Step_AVX<8>( - &rounded_size, _params, grads, _exp_avg_sq, _param_size, dev_params, half_precision); + Step_AVX<8>(&rounded_size, _params, grads, _exp_avg_sq, _param_size); #endif if (_param_size > rounded_size) Step_4((_params + rounded_size), (grads + rounded_size), (_exp_avg_sq + rounded_size), - (_param_size - rounded_size), - (dev_params != nullptr ? (dev_params + rounded_size) : dev_params), - half_precision); + (_param_size - rounded_size)); +} + +template +void step_invoker(std::shared_ptr opt, + void* _params, + void* grads, + void* _exp_avg_sq, + size_t _param_size) +{ + opt->Step_8((ds_params_percision_t*)(_params), + (ds_params_percision_t*)(grads), + (ds_state_precision_t*)(_exp_avg_sq), + _param_size); +} + +std::map, + std::function, void*, void*, void*, size_t)>> + invokers; + +// Fill map with template functions for each type +template +void create_invoker() +{ + invokers[std::tuple(c10::CppTypeToScalarType(), + c10::CppTypeToScalarType())] = + step_invoker; +} +struct InvokerInitializer { + InvokerInitializer() + { + create_invoker(); + create_invoker(); + create_invoker(); + create_invoker(); + create_invoker(); + } +} _invoker_initializer; + +void invoke(std::shared_ptr opt, + torch::Tensor& params, + torch::Tensor& grads, + torch::Tensor& exp_avg_sq, + size_t param_size) +{ + c10::ScalarType params_type = at::typeMetaToScalarType(params.options().dtype()); + c10::ScalarType state_type = at::typeMetaToScalarType(exp_avg_sq.options().dtype()); + + auto it = invokers.find(std::tuple(params_type, state_type)); + if (it == invokers.end()) { + throw std::runtime_error("Adagrad optimizer with param type "s + + c10::toString(params_type) + " and state type "s + + c10::toString(state_type) + + " is not supported on current hardware"s); + } + + it->second(opt, params.data_ptr(), grads.data_ptr(), exp_avg_sq.data_ptr(), param_size); } int ds_adagrad_step(int optimizer_id, @@ -183,58 +190,13 @@ int ds_adagrad_step(int optimizer_id, auto grads_c = grads.contiguous(); auto exp_avg_sq_c = exp_avg_sq.contiguous(); - float* params_ptr = (float*)params_c.data_ptr(); - float* grads_ptr = (float*)grads_c.data_ptr(); - float* exp_avg_sq_ptr = (float*)exp_avg_sq_c.data_ptr(); - std::shared_ptr opt = std::static_pointer_cast(s_optimizers[optimizer_id]); opt->IncrementStep(step); opt->update_state(lr, epsilon, weight_decay); - opt->Step_8(params_ptr, grads_ptr, exp_avg_sq_ptr, params_c.numel()); -#if defined(__ENABLE_CUDA__) or defined(__ENABLE_CANN__) - opt->SynchronizeStreams(); -#endif - return 0; -} + invoke(opt, params_c, grads_c, exp_avg_sq_c, params_c.numel()); -int ds_adagrad_step_plus_copy(int optimizer_id, - size_t step, - float lr, - float epsilon, - float weight_decay, - torch::Tensor& params, - torch::Tensor& grads, - torch::Tensor& exp_avg_sq, - torch::Tensor& gpu_params) -{ -#if defined(__ENABLE_CUDA__) or defined(__ENABLE_CANN__) - auto params_c = params.contiguous(); - auto gpu_params_c = gpu_params.contiguous(); - auto exp_avg_sq_c = exp_avg_sq.contiguous(); - auto grads_c = grads.contiguous(); - - float* params_ptr = (float*)params_c.data_ptr(); - float* grads_ptr = (float*)grads_c.data_ptr(); - ds_half_precision_t* gpu_params_ptr = (ds_half_precision_t*)gpu_params_c.data_ptr(); - float* exp_avg_sq_ptr = (float*)exp_avg_sq_c.data_ptr(); - - std::shared_ptr opt = - std::static_pointer_cast(s_optimizers[optimizer_id]); - opt->IncrementStep(step); - opt->update_state(lr, epsilon, weight_decay); - opt->Step_8(params_ptr, - grads_ptr, - exp_avg_sq_ptr, - params_c.numel(), - gpu_params_ptr, - (params.options().dtype() == at::kHalf)); - - opt->SynchronizeStreams(); -#else - assert(false); -#endif return 0; } @@ -248,9 +210,6 @@ int destroy_adagrad_optimizer(int optimizer_id) PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { m.def("adagrad_update", &ds_adagrad_step, "DeepSpeed CPU Adagrad update (C++)"); - m.def("adagrad_update_copy", - &ds_adagrad_step_plus_copy, - "DeepSpeed CPU Adagrad update and param copy (C++)"); m.def("create_adagrad", &create_adagrad_optimizer, "DeepSpeed CPU Adagrad (C++)"); m.def("destroy_adagrad", &destroy_adagrad_optimizer, "DeepSpeed CPU Adagrad destroy (C++)"); } diff --git a/csrc/adam/cpu_adam.cpp b/csrc/adam/cpu_adam.cpp index 96809827f3e1..263c443cb4d4 100644 --- a/csrc/adam/cpu_adam.cpp +++ b/csrc/adam/cpu_adam.cpp @@ -8,9 +8,6 @@ PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { m.def("adam_update", &ds_adam_step, "DeepSpeed CPU Adam update (C++)"); - m.def("adam_update_copy", - &ds_adam_step_plus_copy, - "DeepSpeed CPU Adam update and param copy (C++)"); m.def("create_adam", &create_adam_optimizer, "DeepSpeed CPU Adam (C++)"); m.def("destroy_adam", &destroy_adam_optimizer, "DeepSpeed CPU Adam destroy (C++)"); } diff --git a/csrc/adam/cpu_adam_impl.cpp b/csrc/adam/cpu_adam_impl.cpp index 9a4a8d956519..15d4e74d69d5 100644 --- a/csrc/adam/cpu_adam_impl.cpp +++ b/csrc/adam/cpu_adam_impl.cpp @@ -5,42 +5,29 @@ #include #include +#include #include +#include #include #include #include #include "cpu_adam.h" -#if defined(__ENABLE_CUDA__) -#include -#include "cublas_v2.h" -#include "cuda.h" -#include "curand.h" -#include "custom_cuda_layers.h" -#endif - +using namespace std::string_literals; static std::unordered_map> s_optimizers; // C++ interface -void Adam_Optimizer::Step_1(float* _params, - float* grads, - float* _exp_avg, - float* _exp_avg_sq, - size_t _param_size, - ds_half_precision_t* dev_params, - bool half_precision) +template +void Adam_Optimizer::Step_1(ds_params_percision_t* _params, + ds_params_percision_t* grads, + ds_state_precision_t* _exp_avg, + ds_state_precision_t* _exp_avg_sq, + size_t _param_size) { size_t rounded_size = 0; #if defined(__AVX512__) or defined(__AVX256__) - Step_AVX<1>(&rounded_size, - _params, - grads, - _exp_avg, - _exp_avg_sq, - _param_size, - dev_params, - half_precision); + Step_AVX<1>(&rounded_size, _params, grads, _exp_avg, _exp_avg_sq, _param_size); #endif if (_param_size > rounded_size) { float betta1_minus1 = 1 - _betta1; @@ -48,26 +35,15 @@ void Adam_Optimizer::Step_1(float* _params, float step_size = -1 * _alpha / _bias_correction1; float w_decay = -1 * _alpha * _weight_decay; - ds_half_precision_t* grads_cast_h; - ds_half_precision_t* params_cast_h; - if (half_precision) { - grads_cast_h = reinterpret_cast(grads); - params_cast_h = reinterpret_cast(_params); - } for (size_t t = rounded_size; t < _param_size; t += TILE) { size_t copy_size = TILE; if ((t + TILE) > _param_size) copy_size = _param_size - t; size_t offset = copy_size + t; -#if defined(__ENABLE_CUDA__) - if ((t / TILE) >= 2) { cudaStreamSynchronize(_streams[_buf_index]); } -#elif defined(__ENABLE_CANN__) - if ((t / TILE) >= 2) { aclrtSynchronizeStream(_streams[_buf_index].stream()); } -#endif #pragma omp parallel for for (size_t k = t; k < offset; k++) { - float grad = half_precision ? (float)grads_cast_h[k] : grads[k]; - float param = half_precision ? (float)params_cast_h[k] : _params[k]; + float grad = (float)grads[k]; + float param = (float)_params[k]; float momentum = _exp_avg[k]; float variance = _exp_avg_sq[k]; if (_weight_decay > 0 && !_adamw_mode) { grad = param * _weight_decay + grad; } @@ -83,66 +59,31 @@ void Adam_Optimizer::Step_1(float* _params, grad = momentum / grad; if (_weight_decay > 0 && _adamw_mode) { param += w_decay * param; } param = grad * step_size + param; -#if defined(__ENABLE_CUDA__) or defined(__ENABLE_CANN__) - if (dev_params) _doubled_buffer[_buf_index][k - t] = param; -#endif - if (half_precision) - params_cast_h[k] = (ds_half_precision_t)param; - else - _params[k] = param; + _params[k] = param; _exp_avg[k] = momentum; _exp_avg_sq[k] = variance; } -#if defined(__ENABLE_CUDA__) - if (dev_params) { - launch_param_update( - _doubled_buffer[_buf_index], dev_params + t, (copy_size), _streams[_buf_index]); - - _buf_index = !_buf_index; - } -#elif defined(__ENABLE_CANN__) - if (dev_params) { - size_t memcpy_size = copy_size * sizeof(_doubled_buffer[_buf_index][0]); - aclrtMemcpy(dev_params + t, - memcpy_size, - _doubled_buffer[_buf_index], - memcpy_size, - aclrtMemcpyKind::ACL_MEMCPY_HOST_TO_DEVICE); - - _buf_index = !_buf_index; - } -#endif } } } -void Adam_Optimizer::Step_4(float* _params, - float* grads, - float* _exp_avg, - float* _exp_avg_sq, - size_t _param_size, - ds_half_precision_t* dev_params, - bool half_precision) +template +void Adam_Optimizer::Step_4(ds_params_percision_t* _params, + ds_params_percision_t* grads, + ds_state_precision_t* _exp_avg, + ds_state_precision_t* _exp_avg_sq, + size_t _param_size) { size_t rounded_size = 0; #if defined(__AVX512__) or defined(__AVX256__) - Step_AVX<4>(&rounded_size, - _params, - grads, - _exp_avg, - _exp_avg_sq, - _param_size, - dev_params, - half_precision); + Step_AVX<4>(&rounded_size, _params, grads, _exp_avg, _exp_avg_sq, _param_size); #endif if (_param_size > rounded_size) Step_1((_params + rounded_size), (grads + rounded_size), (_exp_avg + rounded_size), (_exp_avg_sq + rounded_size), - (_param_size - rounded_size), - (dev_params != nullptr ? (dev_params + rounded_size) : dev_params), - half_precision); + (_param_size - rounded_size)); } int create_adam_optimizer(int optimizer_id, @@ -185,33 +126,86 @@ int create_adam_optimizer(int optimizer_id, return 0; } -void Adam_Optimizer::Step_8(float* _params, - float* grads, - float* _exp_avg, - float* _exp_avg_sq, - size_t _param_size, - ds_half_precision_t* dev_params, - bool half_precision) +template +void Adam_Optimizer::Step_8(ds_params_percision_t* _params, + ds_params_percision_t* grads, + ds_state_precision_t* _exp_avg, + ds_state_precision_t* _exp_avg_sq, + size_t _param_size) { size_t rounded_size = 0; #if defined(__AVX512__) or defined(__AVX256__) - Step_AVX<8>(&rounded_size, - _params, - grads, - _exp_avg, - _exp_avg_sq, - _param_size, - dev_params, - half_precision); + Step_AVX<8>(&rounded_size, _params, grads, _exp_avg, _exp_avg_sq, _param_size); #endif if (_param_size > rounded_size) Step_4((_params + rounded_size), (grads + rounded_size), (_exp_avg + rounded_size), (_exp_avg_sq + rounded_size), - (_param_size - rounded_size), - (dev_params != nullptr ? (dev_params + rounded_size) : dev_params), - half_precision); + (_param_size - rounded_size)); +} + +template +void step_invoker(std::shared_ptr opt, + void* _params, + void* grads, + void* _exp_avg, + void* _exp_avg_sq, + size_t _param_size) +{ + opt->Step_8((ds_params_percision_t*)(_params), + (ds_params_percision_t*)(grads), + (ds_state_precision_t*)(_exp_avg), + (ds_state_precision_t*)(_exp_avg_sq), + _param_size); +} + +std::map, + std::function, void*, void*, void*, void*, size_t)>> + invokers; + +// Fill map with template functions for each type +template +void create_invoker() +{ + invokers[std::tuple(c10::CppTypeToScalarType(), + c10::CppTypeToScalarType())] = + step_invoker; +} +struct InvokerInitializer { + InvokerInitializer() + { + create_invoker(); + create_invoker(); + create_invoker(); + create_invoker(); + create_invoker(); + } +} _invoker_initializer; + +void invoke(std::shared_ptr opt, + torch::Tensor& params, + torch::Tensor& grads, + torch::Tensor& exp_avg, + torch::Tensor& exp_avg_sq, + size_t param_size) +{ + c10::ScalarType params_type = at::typeMetaToScalarType(params.options().dtype()); + c10::ScalarType state_type = at::typeMetaToScalarType(exp_avg.options().dtype()); + + auto it = invokers.find(std::tuple(params_type, state_type)); + if (it == invokers.end()) { + throw std::runtime_error("Adam optimizer with param type "s + c10::toString(params_type) + + " and state type "s + c10::toString(state_type) + + " is not supported on current hardware"s); + } + + it->second(opt, + params.data_ptr(), + grads.data_ptr(), + exp_avg.data_ptr(), + exp_avg_sq.data_ptr(), + param_size); } int ds_adam_step(int optimizer_id, @@ -232,75 +226,13 @@ int ds_adam_step(int optimizer_id, auto exp_avg_c = exp_avg.contiguous(); auto exp_avg_sq_c = exp_avg_sq.contiguous(); - // assert(params.options().dtype() == grads.options().dtype()); - - float* params_ptr = (float*)params_c.data_ptr(); - float* grads_ptr = (float*)grads_c.data_ptr(); - float* exp_avg_ptr = (float*)exp_avg_c.data_ptr(); - float* exp_avg_sq_ptr = (float*)exp_avg_sq_c.data_ptr(); - std::shared_ptr opt = std::static_pointer_cast(s_optimizers[optimizer_id]); opt->IncrementStep(step, beta1, beta2); opt->update_state(lr, epsilon, weight_decay, bias_correction); - opt->Step_8(params_ptr, - grads_ptr, - exp_avg_ptr, - exp_avg_sq_ptr, - params_c.numel(), - nullptr, - (params.options().dtype() == at::kHalf)); + invoke(opt, params_c, grads_c, exp_avg_c, exp_avg_sq_c, params_c.numel()); -#if defined(__ENABLE_CUDA__) or defined(__ENABLE_CANN__) - opt->SynchronizeStreams(); -#endif - return 0; -} - -int ds_adam_step_plus_copy(int optimizer_id, - size_t step, - float lr, - float beta1, - float beta2, - float epsilon, - float weight_decay, - bool bias_correction, - torch::Tensor& params, - torch::Tensor& grads, - torch::Tensor& exp_avg, - torch::Tensor& exp_avg_sq, - torch::Tensor& device_params) -{ -#if defined(__ENABLE_CUDA__) or defined(__ENABLE_CANN__) - auto params_c = params.contiguous(); - auto device_params_c = device_params.contiguous(); - auto exp_avg_c = exp_avg.contiguous(); - auto exp_avg_sq_c = exp_avg_sq.contiguous(); - auto grads_c = grads.contiguous(); - - float* params_ptr = (float*)params_c.data_ptr(); - float* grads_ptr = (float*)grads_c.data_ptr(); - ds_half_precision_t* device_params_ptr = (ds_half_precision_t*)device_params_c.data_ptr(); - float* exp_avg_ptr = (float*)exp_avg_c.data_ptr(); - float* exp_avg_sq_ptr = (float*)exp_avg_sq_c.data_ptr(); - - std::shared_ptr opt = - std::static_pointer_cast(s_optimizers[optimizer_id]); - opt->IncrementStep(step, beta1, beta2); - opt->update_state(lr, epsilon, weight_decay, bias_correction); - opt->Step_8(params_ptr, - grads_ptr, - exp_avg_ptr, - exp_avg_sq_ptr, - params_c.numel(), - device_params_ptr, - (params.options().dtype() == at::kHalf)); - - opt->SynchronizeStreams(); -#else - assert(false); -#endif return 0; } diff --git a/csrc/common/custom_cuda_kernel.cu b/csrc/common/custom_cuda_kernel.cu deleted file mode 100644 index f46bf303125c..000000000000 --- a/csrc/common/custom_cuda_kernel.cu +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright (c) Microsoft Corporation. -// SPDX-License-Identifier: Apache-2.0 - -// DeepSpeed Team - -#include "custom_cuda_layers.h" - -__global__ void param_update_kernel(const float* input, __half* output, int size) -{ - int id = blockIdx.x * blockDim.x + threadIdx.x; - - if (id < size) { output[id] = (__half)input[id]; } -} - -void launch_param_update(const float* input, __half* output, int size, cudaStream_t stream) -{ - int threads = 1024; - - dim3 grid_dim((size - 1) / threads + 1); - dim3 block_dim(threads); - - param_update_kernel<<>>(input, output, size); -} - -__global__ void param_update_kernel_half(const float* input, __half* output, int size) -{ - int id = blockIdx.x * blockDim.x + threadIdx.x; - __half2* output_cast = reinterpret_cast<__half2*>(output); - if (id < size) { - float input_f = input[id]; - __half2* input_h = reinterpret_cast<__half2*>(&input_f); - output_cast[id] = *input_h; - } -} - -void launch_param_update_half(const float* input, __half* output, int size, cudaStream_t stream) -{ - int threads = 1024; - size /= 2; - dim3 grid_dim((size - 1) / threads + 1); - dim3 block_dim(threads); - - param_update_kernel_half<<>>(input, output, size); -} diff --git a/csrc/includes/cpu_adagrad.h b/csrc/includes/cpu_adagrad.h index e60984d64b76..c06d3a6b35e9 100644 --- a/csrc/includes/cpu_adagrad.h +++ b/csrc/includes/cpu_adagrad.h @@ -9,84 +9,35 @@ // https://stackoverflow.com/questions/4913922/possible-problems-with-nominmax-on-visual-c #include +#include #include #include "simd.h" -#if defined(__ENABLE_CUDA__) -#include -#include -#include "cuda.h" -#include "custom_cuda_layers.h" -typedef __half ds_half_precision_t; -#elif defined(__ENABLE_CANN__) -#include "acl/acl.h" -#include "torch_npu/csrc/core/npu/NPUStream.h" -typedef c10::Half ds_half_precision_t; -#else -typedef unsigned short ds_half_precision_t; -#endif - -#define STEP(SPAN) \ - void Step_##SPAN(float* _params, \ - float* grads, \ - float* _exp_avg_sq, \ - size_t _param_size, \ - ds_half_precision_t* dev_param = nullptr, \ - bool half_precision = false); +#define STEP(SPAN) \ + template \ + void Step_##SPAN(ds_params_percision_t* _params, \ + ds_params_percision_t* grads, \ + ds_state_precision_t* _exp_avg_sq, \ + size_t _param_size); class Adagrad_Optimizer { public: Adagrad_Optimizer(float alpha = 1e-2, float eps = 1e-8, float weight_decay = 0) : _alpha(alpha), _eps(eps), _weight_decay(weight_decay) { -#if defined(__ENABLE_CUDA__) - cudaMallocHost((void**)_doubled_buffer, TILE * sizeof(float)); - cudaMallocHost((void**)(_doubled_buffer + 1), TILE * sizeof(float)); - - _streams[0] = TrainingContext::Instance().GetCurrentStream(); - _streams[1] = TrainingContext::Instance().GetNewStream(); - _buf_index = false; -#elif defined(__ENABLE_CANN__) - aclrtMallocHost((void**)_doubled_buffer, TILE * sizeof(float)); - aclrtMallocHost((void**)(_doubled_buffer + 1), TILE * sizeof(float)); - - _buf_index = false; -#endif - } - ~Adagrad_Optimizer() - { -#if defined(__ENABLE_CUDA__) - cudaFreeHost(_doubled_buffer[0]); - cudaFreeHost(_doubled_buffer[1]); -#elif defined(__ENABLE_CANN__) - aclrtFreeHost(_doubled_buffer[0]); - aclrtFreeHost(_doubled_buffer[1]); -#endif } + ~Adagrad_Optimizer() {} #if defined(__AVX512__) or defined(__AVX256__) - template + template void Step_AVX(size_t* rounded_size, - float* _params, - float* grads, - float* _exp_avg_sq, - size_t param_size, - ds_half_precision_t* dev_param = nullptr, - bool half_precision = false); + ds_params_percision_t* _params, + ds_params_percision_t* grads, + ds_state_precision_t* _exp_avg_sq, + size_t param_size); #endif STEP(1) STEP(4) STEP(8) -#if defined(__ENABLE_CUDA__) - inline void SynchronizeStreams() - { - for (int i = 0; i < 2; i++) cudaStreamSynchronize(_streams[i]); - } -#elif defined(__ENABLE_CANN__) - inline void SynchronizeStreams() - { - for (int i = 0; i < 2; i++) aclrtSynchronizeStream(_streams[i].stream()); - } -#endif inline void IncrementStep(size_t step) { _step++; @@ -107,29 +58,22 @@ class Adagrad_Optimizer { float _betta1_t; float _betta2_t; size_t _step; - -#if defined(__ENABLE_CUDA__) - bool _buf_index; - float* _doubled_buffer[2]; - cudaStream_t _streams[2]; -#elif defined(__ENABLE_CANN__) - float* _doubled_buffer[2]; - c10_npu::NPUStream _streams[2] = {c10_npu::getCurrentNPUStream(), - c10_npu::getNPUStreamFromPool()}; - bool _buf_index; -#endif }; #if defined(__AVX512__) or defined(__AVX256__) -template +template void Adagrad_Optimizer::Step_AVX(size_t* rounded_size, - float* _params, - float* grads, - float* _exp_avg_sq, - size_t _param_size, - ds_half_precision_t* dev_params, - bool half_precision) + ds_params_percision_t* _params, + ds_params_percision_t* grads, + ds_state_precision_t* _exp_avg_sq, + size_t _param_size) { +#if !defined(__AVX512__) + if (std::is_same_v || + std::is_same_v) { + return; + } +#endif size_t new_rounded_size = 0; AVX_Data eps_4; eps_4.data = SIMD_SET(_eps); @@ -145,24 +89,19 @@ void Adagrad_Optimizer::Step_AVX(size_t* rounded_size, size_t copy_size = TILE; if ((t + TILE) > new_rounded_size) copy_size = new_rounded_size - t; size_t offset = copy_size + t; -#if defined(__ENABLE_CUDA__) - if ((t / TILE) >= 2) { cudaStreamSynchronize(_streams[_buf_index]); } -#elif defined(__ENABLE_CANN__) - if ((t / TILE) >= 2) { aclrtSynchronizeStream(_streams[_buf_index].stream()); } -#endif #pragma omp parallel for for (size_t i = t; i < offset; i += SIMD_WIDTH * span) { AVX_Data grad_4[span]; - simd_load(grad_4, grads + i, half_precision); + simd_load(grad_4, grads + i); AVX_Data momentum_4[span]; - simd_load(momentum_4, grads + i, false); + simd_load(momentum_4, grads + i); AVX_Data variance_4[span]; - simd_load(variance_4, _exp_avg_sq + i, false); + simd_load(variance_4, _exp_avg_sq + i); AVX_Data param_4[span]; - simd_load(param_4, _params + i, half_precision); + simd_load(param_4, _params + i); if (_weight_decay > 0) { simd_fma(grad_4, param_4, weight_decay4, grad_4); } @@ -172,38 +111,9 @@ void Adagrad_Optimizer::Step_AVX(size_t* rounded_size, simd_div(grad_4, momentum_4, grad_4); simd_fma(param_4, grad_4, step_size_4, param_4); - simd_store(_params + i, param_4, half_precision); -#if defined(__ENABLE_CUDA__) or defined(__ENABLE_CANN__) - if (dev_params) { - simd_store(_doubled_buffer[_buf_index] + (i - t), param_4, half_precision); - } -#endif - simd_store(_exp_avg_sq + i, variance_4, false); - } -#if defined(__ENABLE_CUDA__) - if (dev_params) { - if (half_precision) - launch_param_update_half( - _doubled_buffer[_buf_index], dev_params + t, copy_size, _streams[_buf_index]); - else - launch_param_update( - _doubled_buffer[_buf_index], dev_params + t, copy_size, _streams[_buf_index]); - - _buf_index = !_buf_index; + simd_store(_params + i, param_4); + simd_store(_exp_avg_sq + i, variance_4); } -#elif defined(__ENABLE_CANN__) - if (dev_params) { - size_t memcpy_size = copy_size * sizeof(_doubled_buffer[_buf_index][0]); - if (half_precision) memcpy_size /= 2; - aclrtMemcpy(dev_params + t, - memcpy_size, - _doubled_buffer[_buf_index], - memcpy_size, - aclrtMemcpyKind::ACL_MEMCPY_HOST_TO_DEVICE); - - _buf_index = !_buf_index; - } -#endif } *rounded_size = new_rounded_size; } diff --git a/csrc/includes/cpu_adam.h b/csrc/includes/cpu_adam.h index b1a104b2571d..faf99020aee5 100644 --- a/csrc/includes/cpu_adam.h +++ b/csrc/includes/cpu_adam.h @@ -13,29 +13,13 @@ #include #include "simd.h" -#if defined(__ENABLE_CUDA__) -#include -#include -#include "cuda.h" -#include "custom_cuda_layers.h" -typedef __half ds_half_precision_t; -#elif defined(__ENABLE_CANN__) -#include "acl/acl.h" -#include "torch_npu/csrc/core/npu/NPUStream.h" -typedef c10::Half ds_half_precision_t; -#else -#include -typedef unsigned short ds_half_precision_t; -#endif - -#define STEP(SPAN) \ - void Step_##SPAN(float* _params, \ - float* grads, \ - float* _exp_avg, \ - float* _exp_avg_sq, \ - size_t _param_size, \ - ds_half_precision_t* dev_param = nullptr, \ - bool half_precision = false); +#define STEP(SPAN) \ + template \ + void Step_##SPAN(ds_params_percision_t* _params, \ + ds_params_percision_t* grads, \ + ds_state_precision_t* _exp_avg, \ + ds_state_precision_t* _exp_avg_sq, \ + size_t _param_size); class Adam_Optimizer { public: @@ -55,56 +39,21 @@ class Adam_Optimizer { _step(0), _adamw_mode(adamw_mode) { -#if defined(__ENABLE_CUDA__) - cudaMallocHost((void**)_doubled_buffer, TILE * sizeof(float)); - cudaMallocHost((void**)(_doubled_buffer + 1), TILE * sizeof(float)); - - _streams[0] = TrainingContext::Instance().GetCurrentStream(); - _streams[1] = TrainingContext::Instance().GetNewStream(); - _buf_index = false; -#elif defined(__ENABLE_CANN__) - aclrtMallocHost((void**)_doubled_buffer, TILE * sizeof(float)); - aclrtMallocHost((void**)(_doubled_buffer + 1), TILE * sizeof(float)); - - _buf_index = false; -#endif - } - ~Adam_Optimizer() - { -#if defined(__ENABLE_CUDA__) - cudaFreeHost(_doubled_buffer[0]); - cudaFreeHost(_doubled_buffer[1]); -#elif defined(__ENABLE_CANN__) - aclrtFreeHost(_doubled_buffer[0]); - aclrtFreeHost(_doubled_buffer[1]); -#endif } + ~Adam_Optimizer() {} #if defined(__AVX512__) or defined(__AVX256__) - template + template void Step_AVX(size_t* rounded_size, - float* _params, - float* grads, - float* _exp_avg, - float* _exp_avg_sq, - size_t param_size, - ds_half_precision_t* dev_param = nullptr, - bool half_precision = false); + ds_params_percision_t* _params, + ds_params_percision_t* grads, + ds_state_precision_t* _exp_avg, + ds_state_precision_t* _exp_avg_sq, + size_t param_size); #endif STEP(1) STEP(4) STEP(8) -#if defined(__ENABLE_CUDA__) - inline void SynchronizeStreams() - { - for (int i = 0; i < 2; i++) cudaStreamSynchronize(_streams[i]); - } -#elif defined(__ENABLE_CANN__) - inline void SynchronizeStreams() - { - for (int i = 0; i < 2; i++) aclrtSynchronizeStream(_streams[i].stream()); - } -#endif inline void IncrementStep(size_t step, float beta1, float beta2) { if (beta1 != _betta1 || beta2 != _betta2) { @@ -154,32 +103,24 @@ class Adam_Optimizer { float _bias_correction2; bool _adamw_mode; - -#if defined(__ENABLE_CUDA__) - float* _doubled_buffer[2]; - cudaStream_t _streams[2]; - bool _buf_index; -#elif defined(__ENABLE_CANN__) - float* _doubled_buffer[2]; - c10_npu::NPUStream _streams[2] = {c10_npu::getCurrentNPUStream(), - c10_npu::getNPUStreamFromPool()}; - bool _buf_index; -#endif }; #if defined(__AVX512__) or defined(__AVX256__) -template +template void Adam_Optimizer::Step_AVX(size_t* rounded_size, - float* _params, - float* grads, - float* _exp_avg, - float* _exp_avg_sq, - size_t _param_size, - ds_half_precision_t* dev_params, - bool half_precision) + ds_params_percision_t* _params, + ds_params_percision_t* grads, + ds_state_precision_t* _exp_avg, + ds_state_precision_t* _exp_avg_sq, + size_t _param_size) { +#if !defined(__AVX512__) + if (std::is_same_v || + std::is_same_v) { + return; + } +#endif size_t new_rounded_size = 0; - int rshft = half_precision ? 1 : 0; AVX_Data betta1_4; betta1_4.data = SIMD_SET(_betta1); @@ -212,24 +153,19 @@ void Adam_Optimizer::Step_AVX(size_t* rounded_size, size_t copy_size = TILE; if ((t + TILE) > new_rounded_size) copy_size = new_rounded_size - t; size_t offset = copy_size + t; -#if defined(__ENABLE_CUDA__) - if ((t / TILE) >= 2) { cudaStreamSynchronize(_streams[_buf_index]); } -#elif defined(__ENABLE_CANN__) - if ((t / TILE) >= 2) { aclrtSynchronizeStream(_streams[_buf_index].stream()); } -#endif #pragma omp parallel for for (size_t i = t; i < offset; i += SIMD_WIDTH * span) { AVX_Data grad_4[span]; - simd_load(grad_4, grads + (i >> rshft), half_precision); + simd_load(grad_4, grads + i); AVX_Data momentum_4[span]; - simd_load(momentum_4, _exp_avg + i, false); + simd_load(momentum_4, _exp_avg + i); AVX_Data variance_4[span]; - simd_load(variance_4, _exp_avg_sq + i, false); + simd_load(variance_4, _exp_avg_sq + i); AVX_Data param_4[span]; - simd_load(param_4, _params + (i >> rshft), half_precision); + simd_load(param_4, _params + i); if (_weight_decay > 0 && !_adamw_mode) { simd_fma(grad_4, param_4, weight_decay4, grad_4); @@ -250,39 +186,10 @@ void Adam_Optimizer::Step_AVX(size_t* rounded_size, simd_fma(param_4, grad_4, step_size_4, param_4); - simd_store(_params + (i >> rshft), param_4, half_precision); -#if defined(__ENABLE_CUDA__) or defined(__ENABLE_CANN__) - if (dev_params) { - simd_store(_doubled_buffer[_buf_index] + (i - t), param_4, half_precision); - } -#endif - simd_store(_exp_avg + i, momentum_4, false); - simd_store(_exp_avg_sq + i, variance_4, false); + simd_store(_params + i, param_4); + simd_store(_exp_avg + i, momentum_4); + simd_store(_exp_avg_sq + i, variance_4); } -#if defined(__ENABLE_CUDA__) - if (dev_params) { - if (half_precision) - launch_param_update_half( - _doubled_buffer[_buf_index], dev_params + t, copy_size, _streams[_buf_index]); - else - launch_param_update( - _doubled_buffer[_buf_index], dev_params + t, copy_size, _streams[_buf_index]); - - _buf_index = !_buf_index; - } -#elif defined(__ENABLE_CANN__) - if (dev_params) { - size_t memcpy_size = copy_size * sizeof(_doubled_buffer[_buf_index][0]); - if (half_precision) memcpy_size /= 2; - aclrtMemcpy(dev_params + t, - memcpy_size, - _doubled_buffer[_buf_index], - memcpy_size, - aclrtMemcpyKind::ACL_MEMCPY_HOST_TO_DEVICE); - - _buf_index = !_buf_index; - } -#endif } *rounded_size = new_rounded_size; } @@ -310,18 +217,4 @@ int ds_adam_step(int optimizer_id, torch::Tensor& exp_avg, torch::Tensor& exp_avg_sq); -int ds_adam_step_plus_copy(int optimizer_id, - size_t step, - float lr, - float beta1, - float beta2, - float epsilon, - float weight_decay, - bool bias_correction, - torch::Tensor& params, - torch::Tensor& grads, - torch::Tensor& exp_avg, - torch::Tensor& exp_avg_sq, - torch::Tensor& gpu_params); - int destroy_adam_optimizer(int optimizer_id); diff --git a/csrc/includes/cpu_lion.h b/csrc/includes/cpu_lion.h index 34c29eec47db..62b304923222 100644 --- a/csrc/includes/cpu_lion.h +++ b/csrc/includes/cpu_lion.h @@ -13,28 +13,12 @@ #include #include "simd.h" -#if defined(__ENABLE_CUDA__) -#include -#include -#include "cuda.h" -#include "custom_cuda_layers.h" -typedef __half ds_half_precision_t; -#elif defined(__ENABLE_CANN__) -#include "acl/acl.h" -#include "torch_npu/csrc/core/npu/NPUStream.h" -typedef c10::Half ds_half_precision_t; -#else -#include -typedef unsigned short ds_half_precision_t; -#endif - -#define STEP(SPAN) \ - void Step_##SPAN(float* _params, \ - float* grads, \ - float* _exp_avg, \ - size_t _param_size, \ - ds_half_precision_t* dev_param = nullptr, \ - bool half_precision = false); +#define STEP(SPAN) \ + template \ + void Step_##SPAN(ds_params_percision_t* _params, \ + ds_params_percision_t* grads, \ + ds_state_precision_t* _exp_avg, \ + size_t _param_size); class Lion_Optimizer { public: @@ -44,55 +28,21 @@ class Lion_Optimizer { float weight_decay = 0) : _alpha(alpha), _betta1(betta1), _betta2(betta2), _weight_decay(weight_decay), _step(0) { -#if defined(__ENABLE_CUDA__) - cudaMallocHost((void**)_doubled_buffer, TILE * sizeof(float)); - cudaMallocHost((void**)(_doubled_buffer + 1), TILE * sizeof(float)); - - _streams[0] = TrainingContext::Instance().GetCurrentStream(); - _streams[1] = TrainingContext::Instance().GetNewStream(); - _buf_index = false; -#elif defined(__ENABLE_CANN__) - aclrtMallocHost((void**)_doubled_buffer, TILE * sizeof(float)); - aclrtMallocHost((void**)(_doubled_buffer + 1), TILE * sizeof(float)); - - _buf_index = false; -#endif - } - ~Lion_Optimizer() - { -#if defined(__ENABLE_CUDA__) - cudaFreeHost(_doubled_buffer[0]); - cudaFreeHost(_doubled_buffer[1]); -#elif defined(__ENABLE_CANN__) - aclrtFreeHost(_doubled_buffer[0]); - aclrtFreeHost(_doubled_buffer[1]); -#endif } + ~Lion_Optimizer() {} #if defined(__AVX512__) or defined(__AVX256__) - template + template void Step_AVX(size_t* rounded_size, - float* _params, - float* grads, - float* _exp_avg, - size_t param_size, - ds_half_precision_t* dev_param = nullptr, - bool half_precision = false); + ds_params_percision_t* _params, + ds_params_percision_t* grads, + ds_state_precision_t* _exp_avg, + size_t param_size); #endif STEP(1) STEP(4) STEP(8) -#if defined(__ENABLE_CUDA__) - inline void SynchronizeStreams() - { - for (int i = 0; i < 2; i++) cudaStreamSynchronize(_streams[i]); - } -#elif defined(__ENABLE_CANN__) - inline void SynchronizeStreams() - { - for (int i = 0; i < 2; i++) aclrtSynchronizeStream(_streams[i].stream()); - } -#endif + inline void IncrementStep(size_t step, float beta1, float beta2) { _step++; @@ -114,31 +64,23 @@ class Lion_Optimizer { float _betta2; float _weight_decay; size_t _step; - -#if defined(__ENABLE_CUDA__) - float* _doubled_buffer[2]; - cudaStream_t _streams[2]; - bool _buf_index; -#elif defined(__ENABLE_CANN__) - float* _doubled_buffer[2]; - c10_npu::NPUStream _streams[2] = {c10_npu::getCurrentNPUStream(), - c10_npu::getNPUStreamFromPool()}; - bool _buf_index; -#endif }; #if defined(__AVX512__) or defined(__AVX256__) -template +template void Lion_Optimizer::Step_AVX(size_t* rounded_size, - float* _params, - float* grads, - float* _exp_avg, - size_t _param_size, - ds_half_precision_t* dev_params, - bool half_precision) + ds_params_percision_t* _params, + ds_params_percision_t* grads, + ds_state_precision_t* _exp_avg, + size_t _param_size) { +#if !defined(__AVX512__) + if (std::is_same_v || + std::is_same_v) { + return; + } +#endif size_t new_rounded_size = 0; - int rshft = half_precision ? 1 : 0; constexpr float neg1 = -1.0f; AVX_Data neg1_4; @@ -169,21 +111,17 @@ void Lion_Optimizer::Step_AVX(size_t* rounded_size, size_t copy_size = TILE; if ((t + TILE) > new_rounded_size) copy_size = new_rounded_size - t; size_t offset = copy_size + t; -#if defined(__ENABLE_CUDA__) - if ((t / TILE) >= 2) { cudaStreamSynchronize(_streams[_buf_index]); } -#elif defined(__ENABLE_CANN__) - if ((t / TILE) >= 2) { aclrtSynchronizeStream(_streams[_buf_index].stream()); } -#endif + #pragma omp parallel for for (size_t i = t; i < offset; i += SIMD_WIDTH * span) { AVX_Data grad_4[span]; - simd_load(grad_4, grads + (i >> rshft), half_precision); + simd_load(grad_4, grads + i); AVX_Data momentum_4[span]; - simd_load(momentum_4, _exp_avg + i, false); + simd_load(momentum_4, _exp_avg + i); AVX_Data param_4[span]; - simd_load(param_4, _params + (i >> rshft), half_precision); + simd_load(param_4, _params + i); AVX_Data tmp_4[span]; @@ -201,38 +139,9 @@ void Lion_Optimizer::Step_AVX(size_t* rounded_size, simd_mul(momentum_4, momentum_4, betta2_4); simd_fma(momentum_4, grad_4, betta2_minus1_4, momentum_4); - simd_store(_params + (i >> rshft), param_4, half_precision); -#if defined(__ENABLE_CUDA__) or defined(__ENABLE_CANN__) - if (dev_params) { - simd_store(_doubled_buffer[_buf_index] + (i - t), param_4, half_precision); - } -#endif - simd_store(_exp_avg + i, momentum_4, false); - } -#if defined(__ENABLE_CUDA__) - if (dev_params) { - if (half_precision) - launch_param_update_half( - _doubled_buffer[_buf_index], dev_params + t, copy_size, _streams[_buf_index]); - else - launch_param_update( - _doubled_buffer[_buf_index], dev_params + t, copy_size, _streams[_buf_index]); - - _buf_index = !_buf_index; + simd_store(_params + i, param_4); + simd_store(_exp_avg + i, momentum_4); } -#elif defined(__ENABLE_CANN__) - if (dev_params) { - size_t memcpy_size = copy_size * sizeof(_doubled_buffer[_buf_index][0]); - if (half_precision) memcpy_size /= 2; - aclrtMemcpy(dev_params + t, - memcpy_size, - _doubled_buffer[_buf_index], - memcpy_size, - aclrtMemcpyKind::ACL_MEMCPY_HOST_TO_DEVICE); - - _buf_index = !_buf_index; - } -#endif } *rounded_size = new_rounded_size; } @@ -255,15 +164,4 @@ int ds_lion_step(int optimizer_id, torch::Tensor& grads, torch::Tensor& exp_avg); -int ds_lion_step_plus_copy(int optimizer_id, - size_t step, - float lr, - float beta1, - float beta2, - float weight_decay, - torch::Tensor& params, - torch::Tensor& grads, - torch::Tensor& exp_avg, - torch::Tensor& gpu_params); - int destroy_lion_optimizer(int optimizer_id); diff --git a/csrc/includes/custom_cuda_layers.h b/csrc/includes/custom_cuda_layers.h index 265eb7b12444..21f19749d4cf 100644 --- a/csrc/includes/custom_cuda_layers.h +++ b/csrc/includes/custom_cuda_layers.h @@ -272,9 +272,6 @@ void launch_fuse_transpose_bias_kernel(const T* inp, int cols, cudaStream_t stream); -void launch_param_update(const float* input, __half* output, int size, cudaStream_t stream); -void launch_param_update_half(const float* input, __half* output, int size, cudaStream_t stream); - void launch_token_sort(int32_t* indices, int layers, int batch_size, diff --git a/csrc/includes/simd.h b/csrc/includes/simd.h index 59237b0261c1..73e41216a652 100644 --- a/csrc/includes/simd.h +++ b/csrc/includes/simd.h @@ -13,6 +13,19 @@ #define TILE (128 * 1024 * 1024) #if defined(__AVX512__) or defined(__AVX256__) +template +inline T readAs(const void* src) +{ + T res; + std::memcpy(&res, src, sizeof(T)); + return res; +} +template +inline void writeAs(void* dst, const T& val) +{ + std::memcpy(dst, &val, sizeof(T)); +} + #define ROUND_DOWN(size, step) ((size) & ~((step)-1)) #if defined(__AVX512__) @@ -30,11 +43,52 @@ #define SIMD_XOR(x, y) _mm512_xor_ps(x, y) #define SIMD_WIDTH 16 -#define SIMD_LOAD2(x, h) \ - ((h) ? _mm512_cvtph_ps(_mm256_castps_si256(_mm256_loadu_ps(x))) : _mm512_loadu_ps(x)) -#define SIMD_STORE2(x, d, h) \ - ((h) ? _mm256_store_ps(x, _mm256_castsi256_ps(_mm512_cvtps_ph(d, _MM_FROUND_TO_NEAREST_INT))) \ - : _mm512_storeu_ps(x, d)) +static __m512 load_16_bf16_as_f32(const void* data) +{ + __m256i a = readAs<__m256i>(data); // use memcpy to avoid aliasing + __m512i b = _mm512_cvtepu16_epi32(a); // convert 8 u16 to 8 u32 + __m512i c = _mm512_slli_epi32(b, 16); // logical shift left of all u32 by + // 16 bits (representing bf16->f32) + return readAs<__m512>(&c); // use memcpy to avoid aliasing +} + +static void store_16_f32_as_bf16_nearest(__m512 v, void* data) +{ + __m512i u32 = readAs<__m512i>(&v); + + // flow assuming non-nan: + + // uint32_t rounding_bias = ((U32 >> 16) & 1) + UINT32_C(0x7FFF); + __m512i b = _mm512_srli_epi32(u32, 16); + __m512i lsb_mask = _mm512_set1_epi32(0x00000001); + __m512i c = _mm512_and_si512(b, lsb_mask); + __m512i bias_constant = _mm512_set1_epi32(0x00007fff); + __m512i rounding_bias = _mm512_add_epi32(c, bias_constant); + + // uint16_t res = static_cast((U32 + rounding_bias) >> 16); + __m512i d = _mm512_add_epi32(u32, rounding_bias); + __m512i e = _mm512_srli_epi32(d, 16); + __m256i non_nan_res = _mm512_cvtusepi32_epi16(e); + + // handle nan (exp is all 1s and mantissa != 0) + // if ((x & 0x7fffffffU) > 0x7f800000U) + __m512i mask_out_sign = _mm512_set1_epi32(0x7fffffff); + __m512i non_sign_bits = _mm512_and_si512(u32, mask_out_sign); + __m512i nan_threshold = _mm512_set1_epi32(0x7f800000); + __mmask16 nan_mask = _mm512_cmp_epi32_mask(non_sign_bits, nan_threshold, _MM_CMPINT_GT); + + // mix in results with nans as needed + __m256i nans = _mm256_set1_epi16(0x7fc0); + __m256i res = _mm256_mask_mov_epi16(non_nan_res, nan_mask, nans); + + writeAs(data, res); +} +#define SIMD_LOAD_BF16(x) load_16_bf16_as_f32(x) +#define SIMD_STORE_BF16(x, d) store_16_f32_as_bf16_nearest(d, x) + +#define SIMD_LOAD_FP16(x) _mm512_cvtph_ps(_mm256_castps_si256(_mm256_loadu_ps(x))) +#define SIMD_STORE_FP16(x, d) \ + _mm256_store_ps(x, _mm256_castsi256_ps(_mm512_cvtps_ph(d, _MM_FROUND_TO_NEAREST_INT))) #define INTV __m256i #elif defined(__AVX256__) @@ -52,11 +106,11 @@ #define SIMD_XOR(x, y) _mm256_xor_ps(x, y) #define SIMD_WIDTH 8 -#define SIMD_LOAD2(x, h) \ - ((h) ? _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*)(x))) : _mm256_loadu_ps(x)) -#define SIMD_STORE2(x, d, h) \ - ((h) ? _mm_store_ps(x, _mm_castsi128_ps(_mm256_cvtps_ph(d, _MM_FROUND_TO_NEAREST_INT))) \ - : _mm256_storeu_ps(x, d)) +#define SIMD_LOAD_BF16(x) static_assert(false && "AVX256 does not support BFloat16") +#define SIMD_STORE_BF16(x, d) static_assert(false && "AVX256 does not support BFloat16") +#define SIMD_LOAD_FP16(x) _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*)x)) +#define SIMD_STORE_FP16(x, d) \ + _mm_store_ps(x, _mm_castsi128_ps(_mm256_cvtps_ph(d, _MM_FROUND_TO_NEAREST_INT))) #define INTV __m128i #endif @@ -70,20 +124,66 @@ union AVX_Data { // float data_f[16]; }; -template -inline void simd_store(float* dst, AVX_Data* src, bool half_precision) +template +inline typename std::enable_if_t, void> simd_store(T* dst, + AVX_Data* src) { - size_t width = (half_precision ? SIMD_WIDTH / 2 : SIMD_WIDTH); + size_t width = SIMD_WIDTH; #pragma unroll - for (size_t i = 0; i < span; ++i) { SIMD_STORE2(dst + width * i, src[i].data, half_precision); } + for (size_t i = 0; i < span; ++i) { SIMD_STORE_FP16((float*)(dst + width * i), src[i].data); } } -template -inline void simd_load(AVX_Data* dst, float* src, bool half_precision) + +template +inline typename std::enable_if_t, void> simd_store(T* dst, + AVX_Data* src) { - size_t width = (half_precision ? SIMD_WIDTH / 2 : SIMD_WIDTH); +#ifdef __AVX512__ + size_t width = SIMD_WIDTH; #pragma unroll - for (size_t i = 0; i < span; ++i) { dst[i].data = SIMD_LOAD2(src + width * i, half_precision); } + for (size_t i = 0; i < span; ++i) { SIMD_STORE_BF16((float*)(dst + width * i), src[i].data); } +#else + throw std::runtime_error("AVX512 required for BFloat16"); +#endif +} + +template +inline typename std::enable_if_t, void> simd_store(T* dst, AVX_Data* src) +{ + size_t width = SIMD_WIDTH; +#pragma unroll + for (size_t i = 0; i < span; ++i) { SIMD_STORE(dst + width * i, src[i].data); } } + +template +inline typename std::enable_if_t, void> simd_load(AVX_Data* dst, + T* src) +{ + size_t width = SIMD_WIDTH; +#pragma unroll + for (size_t i = 0; i < span; ++i) { dst[i].data = SIMD_LOAD_FP16((float*)(src + width * i)); } +} + +template +inline typename std::enable_if_t, void> simd_load(AVX_Data* dst, + T* src) +{ +#ifdef __AVX512__ + size_t width = SIMD_WIDTH; +#pragma unroll + for (size_t i = 0; i < span; ++i) { dst[i].data = SIMD_LOAD_BF16((float*)(src + width * i)); } +#else + throw std::runtime_error("AVX512 required for BFloat16"); +#endif +} + +template +inline typename std::enable_if_t, void> simd_load(AVX_Data* dst, T* src) +{ + size_t width = SIMD_WIDTH; +#pragma unroll + for (size_t i = 0; i < span; ++i) { dst[i].data = SIMD_LOAD(src + width * i); } +} + template inline void simd_fma(AVX_Data* dst, AVX_Data* src_m_l, AVX_Data src_m_r, AVX_Data* src_a) { diff --git a/csrc/lion/cpu_lion.cpp b/csrc/lion/cpu_lion.cpp index a0562eac9c4a..c5cf3e9e9235 100644 --- a/csrc/lion/cpu_lion.cpp +++ b/csrc/lion/cpu_lion.cpp @@ -8,9 +8,6 @@ PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { m.def("lion_update", &ds_lion_step, "DeepSpeed CPU Lion update (C++)"); - m.def("lion_update_copy", - &ds_lion_step_plus_copy, - "DeepSpeed CPU Lion update and param copy (C++)"); m.def("create_lion", &create_lion_optimizer, "DeepSpeed CPU Lion (C++)"); m.def("destroy_lion", &destroy_lion_optimizer, "DeepSpeed CPU Lion destroy (C++)"); } diff --git a/csrc/lion/cpu_lion_impl.cpp b/csrc/lion/cpu_lion_impl.cpp index 28314cf5b6e1..85896ba86e19 100644 --- a/csrc/lion/cpu_lion_impl.cpp +++ b/csrc/lion/cpu_lion_impl.cpp @@ -6,34 +6,28 @@ #include #include #include +#include #include +#include #include #include #include #include "cpu_lion.h" -#if defined(__ENABLE_CUDA__) -#include -#include "cublas_v2.h" -#include "cuda.h" -#include "curand.h" -#include "custom_cuda_layers.h" -#endif - +using namespace std::string_literals; static std::unordered_map> s_optimizers; // C++ interface -void Lion_Optimizer::Step_1(float* _params, - float* grads, - float* _exp_avg, - size_t _param_size, - ds_half_precision_t* dev_params, - bool half_precision) +template +void Lion_Optimizer::Step_1(ds_params_percision_t* _params, + ds_params_percision_t* grads, + ds_state_precision_t* _exp_avg, + size_t _param_size) { size_t rounded_size = 0; #if defined(__AVX512__) or defined(__AVX256__) - Step_AVX<1>(&rounded_size, _params, grads, _exp_avg, _param_size, dev_params, half_precision); + Step_AVX<1>(&rounded_size, _params, grads, _exp_avg, _param_size); #endif if (_param_size > rounded_size) { float betta1_minus1 = 1 - _betta1; @@ -41,26 +35,15 @@ void Lion_Optimizer::Step_1(float* _params, float alpha = _alpha; float after_decay = 1 - alpha * _weight_decay; - ds_half_precision_t* grads_cast_h; - ds_half_precision_t* params_cast_h; - if (half_precision) { - grads_cast_h = reinterpret_cast(grads); - params_cast_h = reinterpret_cast(_params); - } for (size_t t = rounded_size; t < _param_size; t += TILE) { size_t copy_size = TILE; if ((t + TILE) > _param_size) copy_size = _param_size - t; size_t offset = copy_size + t; -#if defined(__ENABLE_CUDA__) - if ((t / TILE) >= 2) { cudaStreamSynchronize(_streams[_buf_index]); } -#elif defined(__ENABLE_CANN__) - if ((t / TILE) >= 2) { aclrtSynchronizeStream(_streams[_buf_index].stream()); } -#endif #pragma omp parallel for for (size_t k = t; k < offset; k++) { - float grad = half_precision ? (float)grads_cast_h[k] : grads[k]; - float param = half_precision ? (float)params_cast_h[k] : _params[k]; + float grad = (float)grads[k]; + float param = (float)_params[k]; float momentum = _exp_avg[k]; float tmp = momentum * _betta1; tmp = grad * betta1_minus1 + tmp; @@ -74,56 +57,28 @@ void Lion_Optimizer::Step_1(float* _params, } momentum = momentum * _betta2; momentum = grad * betta2_minus1 + momentum; -#if defined(__ENABLE_CUDA__) or defined(__ENABLE_CANN__) - if (dev_params) _doubled_buffer[_buf_index][k - t] = param; -#endif - if (half_precision) - params_cast_h[k] = (ds_half_precision_t)param; - else - _params[k] = param; + _params[k] = param; _exp_avg[k] = momentum; } -#if defined(__ENABLE_CUDA__) - if (dev_params) { - launch_param_update( - _doubled_buffer[_buf_index], dev_params + t, (copy_size), _streams[_buf_index]); - - _buf_index = !_buf_index; - } -#elif defined(__ENABLE_CANN__) - if (dev_params) { - size_t memcpy_size = copy_size * sizeof(_doubled_buffer[_buf_index][0]); - aclrtMemcpy(dev_params + t, - memcpy_size, - _doubled_buffer[_buf_index], - memcpy_size, - aclrtMemcpyKind::ACL_MEMCPY_HOST_TO_DEVICE); - - _buf_index = !_buf_index; - } -#endif } } } -void Lion_Optimizer::Step_4(float* _params, - float* grads, - float* _exp_avg, - size_t _param_size, - ds_half_precision_t* dev_params, - bool half_precision) +template +void Lion_Optimizer::Step_4(ds_params_percision_t* _params, + ds_params_percision_t* grads, + ds_state_precision_t* _exp_avg, + size_t _param_size) { size_t rounded_size = 0; #if defined(__AVX512__) or defined(__AVX256__) - Step_AVX<4>(&rounded_size, _params, grads, _exp_avg, _param_size, dev_params, half_precision); + Step_AVX<4>(&rounded_size, _params, grads, _exp_avg, _param_size); #endif if (_param_size > rounded_size) Step_1((_params + rounded_size), (grads + rounded_size), (_exp_avg + rounded_size), - (_param_size - rounded_size), - (dev_params != nullptr ? (dev_params + rounded_size) : dev_params), - half_precision); + (_param_size - rounded_size)); } int create_lion_optimizer(int optimizer_id, @@ -162,24 +117,76 @@ int create_lion_optimizer(int optimizer_id, return 0; } -void Lion_Optimizer::Step_8(float* _params, - float* grads, - float* _exp_avg, - size_t _param_size, - ds_half_precision_t* dev_params, - bool half_precision) +template +void Lion_Optimizer::Step_8(ds_params_percision_t* _params, + ds_params_percision_t* grads, + ds_state_precision_t* _exp_avg, + size_t _param_size) { size_t rounded_size = 0; #if defined(__AVX512__) or defined(__AVX256__) - Step_AVX<8>(&rounded_size, _params, grads, _exp_avg, _param_size, dev_params, half_precision); + Step_AVX<8>(&rounded_size, _params, grads, _exp_avg, _param_size); #endif if (_param_size > rounded_size) Step_4((_params + rounded_size), (grads + rounded_size), (_exp_avg + rounded_size), - (_param_size - rounded_size), - (dev_params != nullptr ? (dev_params + rounded_size) : dev_params), - half_precision); + (_param_size - rounded_size)); +} + +template +void step_invoker(std::shared_ptr opt, + void* _params, + void* grads, + void* _exp_avg, + size_t _param_size) +{ + opt->Step_8((ds_params_percision_t*)(_params), + (ds_params_percision_t*)(grads), + (ds_state_precision_t*)(_exp_avg), + _param_size); +} + +std::map, + std::function, void*, void*, void*, size_t)>> + invokers; + +// Fill map with template functions for each type +template +void create_invoker() +{ + invokers[std::tuple(c10::CppTypeToScalarType(), + c10::CppTypeToScalarType())] = + step_invoker; +} +struct InvokerInitializer { + InvokerInitializer() + { + create_invoker(); + create_invoker(); + create_invoker(); + create_invoker(); + create_invoker(); + } +} _invoker_initializer; + +void invoke(std::shared_ptr opt, + torch::Tensor& params, + torch::Tensor& grads, + torch::Tensor& exp_avg, + size_t param_size) +{ + c10::ScalarType params_type = at::typeMetaToScalarType(params.options().dtype()); + c10::ScalarType state_type = at::typeMetaToScalarType(exp_avg.options().dtype()); + + auto it = invokers.find(std::tuple(params_type, state_type)); + if (it == invokers.end()) { + throw std::runtime_error("Lion optimizer with param type "s + c10::toString(params_type) + + " and state type "s + c10::toString(state_type) + + " is not supported on current hardware"s); + } + + it->second(opt, params.data_ptr(), grads.data_ptr(), exp_avg.data_ptr(), param_size); } int ds_lion_step(int optimizer_id, @@ -196,67 +203,13 @@ int ds_lion_step(int optimizer_id, auto grads_c = grads.contiguous(); auto exp_avg_c = exp_avg.contiguous(); - // assert(params.options().dtype() == grads.options().dtype()); - - float* params_ptr = (float*)params_c.data_ptr(); - float* grads_ptr = (float*)grads_c.data_ptr(); - float* exp_avg_ptr = (float*)exp_avg_c.data_ptr(); - std::shared_ptr opt = std::static_pointer_cast(s_optimizers[optimizer_id]); opt->IncrementStep(step, beta1, beta2); opt->update_state(lr, weight_decay); - opt->Step_8(params_ptr, - grads_ptr, - exp_avg_ptr, - params_c.numel(), - nullptr, - (params.options().dtype() == at::kHalf)); + invoke(opt, params_c, grads_c, exp_avg_c, params_c.numel()); -#if defined(__ENABLE_CUDA__) or defined(__ENABLE_CANN__) - opt->SynchronizeStreams(); -#endif - return 0; -} - -int ds_lion_step_plus_copy(int optimizer_id, - size_t step, - float lr, - float beta1, - float beta2, - float weight_decay, - torch::Tensor& params, - torch::Tensor& grads, - torch::Tensor& exp_avg, - torch::Tensor& gpu_params) -{ -#if defined(__ENABLE_CUDA__) or defined(__ENABLE_CANN__) - auto params_c = params.contiguous(); - auto gpu_params_c = gpu_params.contiguous(); - auto exp_avg_c = exp_avg.contiguous(); - auto grads_c = grads.contiguous(); - - float* params_ptr = (float*)params_c.data_ptr(); - float* grads_ptr = (float*)grads_c.data_ptr(); - ds_half_precision_t* gpu_params_ptr = (ds_half_precision_t*)gpu_params_c.data_ptr(); - float* exp_avg_ptr = (float*)exp_avg_c.data_ptr(); - - std::shared_ptr opt = - std::static_pointer_cast(s_optimizers[optimizer_id]); - opt->IncrementStep(step, beta1, beta2); - opt->update_state(lr, weight_decay); - opt->Step_8(params_ptr, - grads_ptr, - exp_avg_ptr, - params_c.numel(), - gpu_params_ptr, - (params.options().dtype() == at::kHalf)); - - opt->SynchronizeStreams(); -#else - assert(false); -#endif return 0; } diff --git a/csrc/transformer/inference/csrc/pt_binding.cpp b/csrc/transformer/inference/csrc/pt_binding.cpp index b7277d1e1678..ea09017680af 100644 --- a/csrc/transformer/inference/csrc/pt_binding.cpp +++ b/csrc/transformer/inference/csrc/pt_binding.cpp @@ -446,15 +446,16 @@ std::vector ds_softmax_context(at::Tensor& query_key_value, unsigned layer_id, unsigned num_layers, at::Tensor& alibi, - float rope_theta) + float rope_theta, + bool is_prompt, + std::optional token_idx, + std::optional position_ids) { unsigned bsz = query_key_value.size(0); unsigned seq_len = query_key_value.size(1); int k = query_key_value.size(2) / (heads + 2 * (num_kv > 0 ? num_kv : heads)); unsigned hidden_dim = heads * k; - bool is_prompt = (seq_len > 1); - if (is_prompt) InferenceContext::Instance().reset_tokens(seq_len); unsigned soft_len = InferenceContext::Instance().current_tokens(); @@ -2010,7 +2011,7 @@ PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) "DeepSpeed memory allocation for GPT inference with " #_name " (CUDA)"); \ m.def("dequantize_" #_name, \ &ds_dequantize<_dtype>, \ - "DeepSpeed dequantize with " #_name " (CUDA)") + "DeepSpeed dequantize with " #_name " (CUDA)"); DEF_OPS(fp32, float); DEF_OPS(fp16, __half); diff --git a/deepspeed/__init__.py b/deepspeed/__init__.py index 6c7aa8b15ef9..f1d99e1b0e43 100755 --- a/deepspeed/__init__.py +++ b/deepspeed/__init__.py @@ -42,7 +42,6 @@ from .comm.comm import init_distributed from .runtime import zero -from .runtime import DeepSpeedOptimizer, ZeROOptimizer from .runtime.compiler import is_compile_supported from .pipe import PipelineModule diff --git a/deepspeed/checkpoint/__init__.py b/deepspeed/checkpoint/__init__.py index c9822693867d..1f645a494ade 100644 --- a/deepspeed/checkpoint/__init__.py +++ b/deepspeed/checkpoint/__init__.py @@ -15,6 +15,6 @@ from .zero_checkpoint import ZeROCheckpoint -from .universal_checkpoint import enable_universal_checkpoint +from .universal_checkpoint import enable_universal_checkpoint, SubparamShape from .constants import * diff --git a/deepspeed/checkpoint/constants.py b/deepspeed/checkpoint/constants.py index f809a0c39270..046bc242002f 100644 --- a/deepspeed/checkpoint/constants.py +++ b/deepspeed/checkpoint/constants.py @@ -16,6 +16,7 @@ BASE_OPTIMIZER_STATE = 'base_optimizer_state' BASE_OPTIMIZER_STATE_STEP = 'base_optimizer_state_step' SINGLE_PARTITION_OF_FP32_GROUPS = "single_partition_of_fp32_groups" +PARAM_GROUPS = 'param_groups' GROUP_PADDINGS = 'group_paddings' PARTITION_COUNT = 'partition_count' ZERO_STAGE = 'zero_stage' @@ -73,6 +74,8 @@ # Similarly, load_hp_checkpoint_state has to take the needed actions when loading from universal. PARAM_N_SUB_PARAMS = "param_n_sub_params" +SUB_PARAM_SHAPE = "sub_param_shape" + # Regex list of parameters that require special handling VOCABULARY_PARAMETER_PATTERNS = 'vocabulary_parameter_patterns' PIPELINE_REPLICATED_PARAMETER_PATTERNS = 'pipeline_replicated_parameter_patterns' @@ -80,3 +83,5 @@ PARAMETER_WITH_ROW_PARALLELISM_PATTERNS = 'parameter_with_row_parallelism_patterns' TP_REPLICATED_PARAMETER_PATTERNS = 'tp_replicated_parameter_patterns' PARAMETER_WITH_2_SUB_PARAMS_CAT_DIM_0 = 'parameter_with_2_sub_params_cat_dim_0' +PARAMETER_WITH_SUB_PARAMS = 'parameter_with_sub_params' +SUB_PARAMS_SHAPE = 'sub_params_shape' diff --git a/deepspeed/checkpoint/deepspeed_checkpoint.py b/deepspeed/checkpoint/deepspeed_checkpoint.py index 8312dddd2fa6..c2d6f475e979 100644 --- a/deepspeed/checkpoint/deepspeed_checkpoint.py +++ b/deepspeed/checkpoint/deepspeed_checkpoint.py @@ -4,6 +4,7 @@ # DeepSpeed Team import os +import re from typing import Dict import torch @@ -32,7 +33,13 @@ class DeepSpeedCheckpoint(object): - def __init__(self, dir, tp_degree=None, pp_degree=None, dp_degree=None): + def __init__(self, + dir, + tp_degree=None, + pp_degree=None, + dp_degree=None, + final_layer_norm_idx=FINAL_LAYER_NORM_INDEX): + self.final_layer_norm_idx = final_layer_norm_idx self.dir = dir pipeline_parallel = len(get_files_with_prefix(get_files(dir), LAYER_FILE_PREFIX)) > 0 @@ -73,7 +80,7 @@ def __init__(self, dir, tp_degree=None, pp_degree=None, dp_degree=None): self.pp_to_transformer_map = self._build_pp_transformer_map() self.transformer_file_map = self._build_transformer_file_map() self.tp_to_embedding_map = self._build_tp_other_layer_map(EMBEDDING_LAYER_INDEX) - self.tp_to_final_norm_map = self._build_tp_other_layer_map(FINAL_LAYER_NORM_INDEX) + self.tp_to_final_norm_map = self._build_tp_other_layer_map(self.final_layer_norm_idx) self._build_global_state() def is_change_tp_degree(self): @@ -125,7 +132,7 @@ def get_embedding_layer_id(self): return self.layer_keys[EMBEDDING_LAYER_INDEX] def get_final_norm_layer_id(self): - return self.layer_keys[FINAL_LAYER_NORM_INDEX] + return self.layer_keys[self.final_layer_norm_idx] def get_iteration(self): if not ITERATION_KEY in self.global_state: @@ -214,7 +221,7 @@ def get_2d_parallel_files(self, tp_index: int, pp_index: int) -> list: def _build_pp_transformer_map(self): data_map = {} if self.pp_degree > 0: - transformer_layers = self.layer_keys[1:-1] + transformer_layers = self.layer_keys[1:self.final_layer_norm_idx] layers_per_pp = len(transformer_layers) // self.pp_degree data_map = { i: transformer_layers[i * layers_per_pp:(i + 1) * layers_per_pp] @@ -229,7 +236,7 @@ def _dump_mapping(self, data_map, map_tag=None): print(f'{k} = {v}') def _build_transformer_file_map(self): - transformer_layer_keys = self.layer_keys[1:-1] + transformer_layer_keys = self.layer_keys[1:self.final_layer_norm_idx] file_map = {} # XXX: this is not guaranteed layers_per_pp = 1 @@ -238,7 +245,7 @@ def _build_transformer_file_map(self): #print(f"{transformer_layer_keys} {layers_per_pp}") for key_index, layer_key in enumerate(transformer_layer_keys): pp_index = key_index // layers_per_pp - layer_files = get_files_with_prefix(self.layer_files, layer_key) + layer_files = get_files_with_prefix(self.layer_files, layer_key + '-') layer_file_partitions = partition_data(layer_files, self.tp_degree) for tp_index in range(self.tp_degree): map_key = (tp_index, pp_index) @@ -263,11 +270,13 @@ def validate_files(self): def _get_layer_keys(self): key_set = set() - key_len = len(LAYER_FILE_PREFIX) + 2 for file_path in self.layer_files: _, fname = os.path.split(file_path) - key_set.add(fname[:key_len]) - return sorted(list(key_set)) + layer_id = re.search(r'layer_(\d+)-model_.*', fname).group(1) + key_set.add(layer_id) + sorted_ids = sorted(list(key_set), key=int) + layer_keys = [LAYER_FILE_PREFIX + str(layer_id) for layer_id in sorted_ids] + return layer_keys def _merge_state_dicts(self, sd_list): merged_sd = {} diff --git a/deepspeed/checkpoint/ds_to_universal.py b/deepspeed/checkpoint/ds_to_universal.py index f40c5630899d..b1a8276589b6 100755 --- a/deepspeed/checkpoint/ds_to_universal.py +++ b/deepspeed/checkpoint/ds_to_universal.py @@ -6,10 +6,11 @@ # DeepSpeed Team from functools import partial +from itertools import chain import argparse import glob import itertools -import multiprocessing +from concurrent.futures import ProcessPoolExecutor import os import re import shutil @@ -22,11 +23,13 @@ OPTIMIZER_STATE_DICT, BASE_OPTIMIZER_STATE, SINGLE_PARTITION_OF_FP32_GROUPS, + PARAM_GROUPS, PARAM_SLICE_MAPPINGS, PARAM_SHAPES, PARAM, CAT_DIM, PARAM_N_SUB_PARAMS, + SUB_PARAM_SHAPE, VOCAB_TENSOR, UNIVERSAL_CHECKPOINT_INFO, VOCABULARY_PARAMETER_PATTERNS, @@ -35,6 +38,8 @@ PARAMETER_TO_AVERAGE_PATTERNS, PARAMETER_WITH_ROW_PARALLELISM_PATTERNS, PARAMETER_WITH_2_SUB_PARAMS_CAT_DIM_0, + PARAMETER_WITH_SUB_PARAMS, + SubparamShape, ) @@ -110,6 +115,9 @@ def extract_zero_shards(dir, ds_checkpoint, indices_3D): fp32=fp32_groups[param_group_id], ) + if "step" in state_groups[param_group_id]: + flat_state["step"] = state_groups[param_group_id]["step"] + for name, fragment_mapping in param_slice_mappings[param_group_id].items(): if pp_index > 0 and any(re.match(pattern, name) for pattern in pipeline_replicated_params): # Skip tied weights that are replicated in first and last pp stages @@ -124,6 +132,10 @@ def extract_zero_shards(dir, ds_checkpoint, indices_3D): cnt = 0 +def dp_index_to_str(dp_index): + return f"{dp_index:0>2d}" + + def dump_param_fragment(dir, tp_index, dp_index, state_name, state_flat_tensor, param_name, offset, numel): global cnt # temp hack @@ -132,23 +144,44 @@ def dump_param_fragment(dir, tp_index, dp_index, state_name, state_flat_tensor, os.makedirs(param_base_path, exist_ok=True) cnt += 1 - counter = f"{dp_index:0>2d}" - path = os.path.join(param_base_path, f"{state_name}.{counter}") + path = os.path.join(param_base_path, f"{state_name}.{dp_index_to_str(dp_index)}") #print(f"{param_name}: {offset}: {numel} => {path}") - t = state_flat_tensor.narrow(0, offset, numel).clone() - _save_checkpoint(path, t) + # State might be a python int or a tensor + if state_name != "step" and torch.is_tensor(state_flat_tensor): + state_flat_tensor = state_flat_tensor.narrow(0, offset, numel).clone() + _save_checkpoint(path, state_flat_tensor) def _merge_zero_shards(param_base_path, state, tp_degree, slice_shape): slices = [] for tp_index in range(tp_degree): prefix_path = os.path.join(param_base_path, str(tp_index), f"{state}") - paths = sorted(list(glob.glob(f"{prefix_path}.*"))) + paths = glob.glob(f"{prefix_path}.*") + + if len(paths) == 0: + continue + + pattern = re.compile(f"{prefix_path}\\.([0-9]+)") + dp_indices = set() + for p in paths: + m = pattern.match(p) + if m: + dp_indices.add(int(m.group(1))) + else: + raise ValueError(f"Cannot parse dp_rank from {p}") + + paths = [f"{prefix_path}.{dp_index_to_str(dp_index)}" for dp_index in sorted(list(dp_indices))] shards = [torch.load(p) for p in paths] - slice = torch.cat(shards, dim=0).reshape(slice_shape) + + if state == "step": + assert all(v == shards[0] for v in shards), "All shards must have the same step value" + slice = shards[0] + else: + slice = torch.cat(shards, dim=0).reshape(slice_shape) + slices.append(slice) return slices @@ -165,8 +198,11 @@ def merge_tp_slices(ds_checkpoint, dir, slice_dir, tp_degree, name_and_shape): parameters_with_row_parallelism = universal_checkpoint_info.get(PARAMETER_WITH_ROW_PARALLELISM_PATTERNS, []) vocabulary_parameters = universal_checkpoint_info.get(VOCABULARY_PARAMETER_PATTERNS, []) parameters_with_2_sub_params_cat_dim_0 = universal_checkpoint_info.get(PARAMETER_WITH_2_SUB_PARAMS_CAT_DIM_0, []) + parameter_with_sub_params = universal_checkpoint_info.get(PARAMETER_WITH_SUB_PARAMS, []) + unmatched_patterns = set(replicated_parameters + parameters_to_average + parameters_with_row_parallelism + vocabulary_parameters + parameters_with_2_sub_params_cat_dim_0) + unmatched_patterns.update(chain.from_iterable(SubparamShape(**s).patterns for s in parameter_with_sub_params)) def get_matched_pattern(patterns_, name_): matched_ = [pattern_ for pattern_ in patterns_ if re.match(pattern_, name_)] @@ -177,6 +213,21 @@ def get_matched_pattern(patterns_, name_): return pattern_ return None + def get_matched_sub_params_pattern(name_): + for subparam_shape_dict in parameter_with_sub_params: + subparam_shape = SubparamShape(**subparam_shape_dict) + for pattern_ in subparam_shape.patterns: + if re.match(pattern_, name_): + unmatched_patterns.discard(pattern_) + return subparam_shape + return None + + matched_sub_params_shape = get_matched_sub_params_pattern(name) + + step_merged = _merge_zero_shards(slice_base_path, "step", tp_degree, shape) + if step_merged: + _save_checkpoint(os.path.join(param_base_path, f"step.pt"), step_merged[0]) + for state in ("fp32", "exp_avg", "exp_avg_sq"): slices = _merge_zero_shards(slice_base_path, state, tp_degree, shape) final_path = os.path.join(param_base_path, f"{state}.pt") @@ -200,6 +251,26 @@ def get_matched_pattern(patterns_, name_): param = torch.cat([merged_chunks_0, merged_chunks_1], dim=cat_dim) ckpt_dict[CAT_DIM] = cat_dim ckpt_dict[PARAM_N_SUB_PARAMS] = 2 + elif matched_sub_params_shape: + merged_chunks = [] + partition_dim = matched_sub_params_shape.partition_dim + + sub_dim_sizes = matched_sub_params_shape.shape[partition_dim] + if not isinstance(sub_dim_sizes, tuple): + sub_dim_sizes = (sub_dim_sizes, ) + + partition_shape = [sum(d) if isinstance(d, tuple) else d for d in matched_sub_params_shape.shape] + partition_shape = [d // tp_degree if i == partition_dim else d for i, d in enumerate(partition_shape)] + slices = [s.view(partition_shape) for s in slices] + + offset = 0 + for sub_dim_size in sub_dim_sizes: + part_sub_dim_size = sub_dim_size // tp_degree + merged_chunks.append( + torch.cat([s.narrow(partition_dim, offset, part_sub_dim_size) for s in slices], dim=partition_dim)) + offset += part_sub_dim_size + param = torch.cat(merged_chunks, dim=partition_dim) + ckpt_dict[SUB_PARAM_SHAPE] = matched_sub_params_shape else: cat_dim = 1 if get_matched_pattern(parameters_with_row_parallelism, name) else 0 # print(f"merge {name} with CAT DIM: {cat_dim}") @@ -221,19 +292,18 @@ def get_matched_pattern(patterns_, name_): return unmatched_patterns -def _get_chunks(l, n): - for i in range(0, len(l), n): - yield l[i:i + n] - - def _do_parallel_work(do_work, work_chunks, num_workers): - pool = multiprocessing.Pool(num_workers) results = [] - for batch in tqdm.tqdm(work_chunks): - res = pool.map(do_work, batch) - results.extend(res) - pool.close() - pool.join() + if num_workers > 1: + with ProcessPoolExecutor(max_workers=num_workers) as executor: + future_list = [executor.submit(do_work, work) for work in work_chunks] + for f in tqdm.tqdm(future_list): + results.append(f.result()) + else: + # No parallel pass for unit testing + # We can't create child processes in tests + for work in tqdm.tqdm(work_chunks): + results.append(do_work(work)) return results @@ -242,20 +312,15 @@ def _extract_zero_shard_files(args, ds_checkpoint, temp_dir): itertools.product(range(ds_checkpoint.pp_degree), range(ds_checkpoint.tp_degree), range(ds_checkpoint.dp_degree))) #pprint(f'{_3d_range_list=}') - work_chunks = list(_get_chunks(_3d_range_list, args.num_extract_workers)) - #pprint(f'{work_chunks=}') - # extract_zero_shards(temp_dir, ds_checkpoint, _3d_range_list[0]) do_work = partial(extract_zero_shards, temp_dir, ds_checkpoint) - _do_parallel_work(do_work, work_chunks, args.num_extract_workers) + _do_parallel_work(do_work, _3d_range_list, args.num_extract_workers) def _merge_tp_slice_files(args, ds_checkpoint, slice_shapes, temp_dir): - work_chunks = list(_get_chunks(list(slice_shapes.items()), args.num_merge_workers)) - #pprint(work_chunks) zero_output_folder = os.path.join(args.output_folder, "zero") do_work = partial(merge_tp_slices, ds_checkpoint, zero_output_folder, temp_dir, ds_checkpoint.tp_degree) - unmatched_patterns_lists = _do_parallel_work(do_work, work_chunks, args.num_merge_workers) + unmatched_patterns_lists = _do_parallel_work(do_work, list(slice_shapes.items()), args.num_merge_workers) # verify that all patterns were used # if a pattern was not used by any of the workers, then it was not used at all -> assert/alert @@ -273,6 +338,7 @@ def _save_optimizer_state(args, ds_checkpoint): optim_sd = sd[OPTIMIZER_STATE_DICT] output_sd = {k: v for k, v in optim_sd.items() if k not in sharded_states} + output_sd[PARAM_GROUPS] = optim_sd[BASE_OPTIMIZER_STATE][PARAM_GROUPS] zero_output_folder = os.path.join(args.output_folder, "zero") output_file_path = os.path.join(zero_output_folder, f"optimizer_state.pt") _save_checkpoint(output_file_path, output_sd) @@ -283,10 +349,9 @@ def _check_for_required_state(ds_checkpoint): assert universal_checkpoint_info is not None, f'Required {UNIVERSAL_CHECKPOINT_INFO} state is missing in checkpoint. Verify that client creates this state.' -def main(): +def main(args): print(f'Convert DeepSpeed Checkpoint to Universal Checkpoint') - args = parse_arguments() print(f'Converting DeepSpeed checkpoint in {args.input_folder} to Universal checkpoint in {args.output_folder}') ds_checkpoint = DeepSpeedCheckpoint(args.input_folder) @@ -332,4 +397,5 @@ def main(): if __name__ == "__main__": - main() + args = parse_arguments() + main(args) diff --git a/deepspeed/checkpoint/reshape_utils.py b/deepspeed/checkpoint/reshape_utils.py index 15b6ce28b2fd..137607721ebf 100644 --- a/deepspeed/checkpoint/reshape_utils.py +++ b/deepspeed/checkpoint/reshape_utils.py @@ -4,9 +4,10 @@ # DeepSpeed Team import os +import re import torch from collections import OrderedDict -from .constants import (ZERO_FILE_PREFIX, FP16_ZERO_FILE_PREFIX, BF16_ZERO_FILE_PREFIX) +from .constants import (ZERO_FILE_PREFIX, FP16_ZERO_FILE_PREFIX, BF16_ZERO_FILE_PREFIX, MODEL_FILE_PREFIX) def basic_folder_validation(dir): @@ -38,12 +39,28 @@ def get_files(dir): return file_list +def sort_zero_files(files, prefix): + pattern = f"{prefix}([0-9]+)_{MODEL_FILE_PREFIX}([0-9]+)" + rank_pairs = [] + for f in files: + m = re.search(pattern, f) + if m: + dp_rank = int(m.group(1)) + mp_rank = int(m.group(2)) + rank_pairs.append((dp_rank, mp_rank, f)) + else: + raise ValueError(f"Cannot parse dp_rank and mp_rank from {f}") + + sorted_files = sorted(rank_pairs, key=lambda x: (x[0], x[1])) + return [f for _, _, f in sorted_files] + + def get_zero_files(dir): file_list = get_files(dir) for prefix in [ZERO_FILE_PREFIX, FP16_ZERO_FILE_PREFIX, BF16_ZERO_FILE_PREFIX]: zero_files = get_files_with_prefix(file_list, prefix) if len(zero_files) > 0: - return zero_files + return sort_zero_files(zero_files, prefix) return [] diff --git a/deepspeed/checkpoint/universal_checkpoint.py b/deepspeed/checkpoint/universal_checkpoint.py index 542d1125c566..064891a8bb54 100644 --- a/deepspeed/checkpoint/universal_checkpoint.py +++ b/deepspeed/checkpoint/universal_checkpoint.py @@ -4,23 +4,42 @@ # DeepSpeed Team import os +import re import torch import types -from .constants import (FP32_WEIGHT_KEY, PARAM, VOCAB_TENSOR, CAT_DIM, PARAM_N_SUB_PARAMS) +from typing import List, Tuple, Union +from dataclasses import dataclass +from .constants import (FP32_WEIGHT_KEY, PARAM, VOCAB_TENSOR, CAT_DIM, PARAM_N_SUB_PARAMS, SUB_PARAM_SHAPE) + + +@dataclass +class SubparamShape: + patterns: List[str] + shape: Tuple[Union[Tuple[int], int]] + partition_dim: int def load_hp_checkpoint_state(self, folder, tp_rank, tp_world_size): hp_mapping = self._hp_mapping - optim_state_keys = hp_mapping.get_optim_state_keys() - hp_keys = [FP32_WEIGHT_KEY] + optim_state_keys - #print(f'{hp_keys=}') - checkpoint_files = {key: os.path.join(folder, f"{key}.pt") for key in hp_keys} - for file in checkpoint_files.values(): - assert os.path.isfile(file), f'{file} is not a valid file' + hp_mapping.optim_fragment = {} + + hp_keys = [] + for file in os.listdir(folder): + # We expect files named something like "exp_avg.pt", "exp_avg_sq.pt", "fp32.pt" + pattern = r'(.+).pt' + match = re.search(pattern, file) + if match: + hp_keys.append(match.group(1)) + step = None for key in hp_keys: - ckpt_file = checkpoint_files[key] + ckpt_file = os.path.join(folder, f"{key}.pt") ckpt_dict = torch.load(ckpt_file) + + if key == "step": + step = ckpt_dict + continue + full_hp_param = ckpt_dict[PARAM] # need to deal with slices that were averaged. @@ -62,17 +81,36 @@ def load_hp_checkpoint_state(self, folder, tp_rank, tp_world_size): assert full_param_numel == tp_world_size * tp_slice_numel, \ f'Loading {ckpt_file} full param numel {full_param_numel} != tensor slice numel {tp_slice_numel} * tp_world_size {tp_world_size}' - dst_tensor = hp_mapping.hp_fragment if key == FP32_WEIGHT_KEY else hp_mapping.get_optim_state_fragment(key) # print(f"{full_hp_param.shape=} {full_param_numel=} {folder=}") # print(f"{dst_tensor.shape=} {dst_tensor.numel()=}{folder=}") + sub_param_shape = ckpt_dict.get(SUB_PARAM_SHAPE, None) # since when we do many to 1 on tp we cat sometimes on dim=0 and other times on dim=1 we have to do exactly the same in reverse # special case is when a single parameter is effectively a container for multiple sub parameters # (more details at PARAM_N_SUB_PARAMS definition) chunk_dim = ckpt_dict.get(CAT_DIM, 0) n_sub_params = ckpt_dict.get(PARAM_N_SUB_PARAMS, 1) - if n_sub_params > 1: + if sub_param_shape: + partition_dim = sub_param_shape.partition_dim + sub_dim_sizes = sub_param_shape.shape[partition_dim] + if not isinstance(sub_dim_sizes, tuple): + sub_dim_sizes = (sub_dim_sizes, ) + + partition_shape = [sum(d) if isinstance(d, tuple) else d for d in sub_param_shape.shape] + full_hp_param = full_hp_param.view(partition_shape) + + offset = 0 + merged_chunks = [] + for sub_dim_size in sub_dim_sizes: + sub_params_tp_slice = full_hp_param.narrow(partition_dim, + offset, sub_dim_size).chunk(tp_world_size, + dim=partition_dim)[tp_rank] + merged_chunks.append(sub_params_tp_slice) + offset += sub_dim_size + tp_hp_slice = torch.cat(merged_chunks, dim=partition_dim) + + elif n_sub_params > 1: sub_params = full_hp_param.chunk(n_sub_params, dim=chunk_dim) sub_params_tp_slice = [p.chunk(tp_world_size, dim=chunk_dim)[tp_rank] for p in sub_params] tp_hp_slice = torch.cat(sub_params_tp_slice, dim=chunk_dim) @@ -84,13 +122,23 @@ def load_hp_checkpoint_state(self, folder, tp_rank, tp_world_size): lp_frag_address = hp_mapping.lp_fragment_address tp_hp_fragment = tp_hp_slice.narrow(0, lp_frag_address.start, lp_frag_address.numel) - assert dst_tensor.numel() == lp_frag_address.numel, \ - f'Load checkpoint {key} dst_tensor numel {dst_tensor.numel()} != src numel {lp_frag_address.numel}' # print(f"{key} SHAPE: {tp_hp_slice.shape=}") # print(f"{key} SHAPE: {dst_tensor.shape=}") # print(f"{key} SHAPE: {tp_hp_fragment.shape=}") - dst_tensor.data.copy_(tp_hp_fragment.data) + + if key == FP32_WEIGHT_KEY: + dst_tensor = hp_mapping.get_hp_fragment() + assert dst_tensor.numel() == lp_frag_address.numel, \ + f'Load checkpoint {key} dst numel {dst_tensor.numel()} != src numel {lp_frag_address.numel}' + dst_tensor.data.copy_(tp_hp_fragment.data) + else: + assert tp_hp_fragment.numel() == lp_frag_address.numel, \ + f'Load checkpoint {key} dst numel {tp_hp_fragment.numel()} != src numel {lp_frag_address.numel}' + + hp_mapping.optim_fragment[key] = tp_hp_fragment.clone().detach() + + return step def enable_universal_checkpoint(param_list): diff --git a/deepspeed/checkpoint/zero_checkpoint.py b/deepspeed/checkpoint/zero_checkpoint.py index c65745d3dd0c..c5d4102859fb 100644 --- a/deepspeed/checkpoint/zero_checkpoint.py +++ b/deepspeed/checkpoint/zero_checkpoint.py @@ -105,9 +105,11 @@ def _strip_tensor_paddings(self, sd): if group_paddings[key] == 0: continue for state_name, state_value in group_state.items(): - if torch.is_tensor(state_value): + if state_name != "step" and torch.is_tensor(state_value) and state_value.dim(): raw_length = state_value.numel() - group_paddings[key] group_state[state_name] = torch.narrow(state_value, 0, 0, raw_length).clone() + else: + group_state[state_name] = state_value def _clear_group_paddings(self, sd): group_paddings = self._get_optimizer_state(sd, GROUP_PADDINGS) diff --git a/deepspeed/comm/torch.py b/deepspeed/comm/torch.py index d96c63d84e60..4ddba9f07441 100644 --- a/deepspeed/comm/torch.py +++ b/deepspeed/comm/torch.py @@ -158,7 +158,6 @@ def all_reduce(self, tensor, op=torch.distributed.ReduceOp.SUM, group=None, asyn op = self._reduce_op(op) return torch.distributed.all_reduce(tensor=tensor, op=op, group=group, async_op=async_op) - @compiler.disable def inference_all_reduce(self, tensor, op=torch.distributed.ReduceOp.SUM, group=None, async_op=False): op = self._reduce_op(op) return torch.distributed.all_reduce(tensor=tensor, op=op, group=group, async_op=async_op) diff --git a/deepspeed/elasticity/elastic_agent.py b/deepspeed/elasticity/elastic_agent.py index c1e8932ecaba..039b999dfeca 100644 --- a/deepspeed/elasticity/elastic_agent.py +++ b/deepspeed/elasticity/elastic_agent.py @@ -6,7 +6,7 @@ from torch.distributed.elastic.agent.server.local_elastic_agent import LocalElasticAgent from typing import Any, Dict, Optional, Tuple from datetime import datetime -from torch.distributed.elastic.agent.server.api import log, _get_socket_with_port +from torch.distributed.elastic.utils.distributed import get_free_port from torch.distributed.elastic.metrics import put_metric from torch.distributed.elastic.agent.server.api import ( RunResult, @@ -24,6 +24,10 @@ from contextlib import closing import subprocess +from torch.distributed.elastic.utils.logging import get_logger + +log = get_logger(__name__) + class DSElasticAgent(LocalElasticAgent): @@ -44,7 +48,7 @@ def _set_master_addr_port(store: Store, master_port: Optional[int], local_addr: Optional[str] = None): if master_port is None: - sock = _get_socket_with_port() + sock = get_free_port() with closing(sock): master_port = sock.getsockname()[1] diff --git a/deepspeed/inference/engine.py b/deepspeed/inference/engine.py index 5cdd99ff0b90..bc6a5faa45ff 100755 --- a/deepspeed/inference/engine.py +++ b/deepspeed/inference/engine.py @@ -13,6 +13,7 @@ from packaging import version as pkg_version from deepspeed.runtime.checkpoint_engine.torch_checkpoint_engine import TorchCheckpointEngine from deepspeed.utils.timer import SynchronizedWallClockTimer +from deepspeed.runtime.compiler import is_compile_supported from ..runtime.state_dict_factory import SDLoaderFactory from ..runtime.weight_quantizer import WeightQuantization @@ -29,6 +30,7 @@ from ..module_inject.auto_tp_model_utils import build_bloom_alibi_tensor, build_mpt_atten_bias_tensor, build_mpt_alibi_tensor, get_alibi_mask from ..ops.transformer.inference.ds_attention import DeepSpeedSelfAttention from ..model_implementations.transformers.ds_transformer import DeepSpeedTransformerInference +from ..ops.transformer.inference.op_binding.workspace import WorkspaceOp DS_INFERENCE_ENABLED = False from torch import nn @@ -51,13 +53,8 @@ def __init__(self, model, config): DS_INFERENCE_ENABLED = True super().__init__() - - # Have to import here because inference_module is a global, but python - # globals only work at the module level and will not be updated unless - # we import it each time we init a new inference engine. - from ..model_implementations.transformers.ds_transformer import inference_module - if inference_module is not None: - self.destroy() + self.workspace = WorkspaceOp() + self.destroy() self.module = model self._config = config @@ -180,22 +177,19 @@ def __init__(self, model, config): dist.broadcast(_rng_state, 0) get_accelerator().set_rng_state(_rng_state.cpu()) - if config.tensor_parallel.tp_size > 1: + if config.enable_cuda_graph and get_accelerator().device_name() == 'hpu': + self.module = get_accelerator().wrap_in_hpu_graph(self.module) + elif config.tensor_parallel.tp_size > 1: assert not config.enable_cuda_graph, "Cuda graph is not supported for model parallelism" # Check if local CUDA graphs can be created in replacement modules self.local_cuda_graph = self._local_cuda_graph_used(self.module) + self._is_compiled = False def destroy(self): - # Have to import here because inference_module is a global, but python - # globals only work at the module level and will not be updated unless - # we import it each time we init a new inference engine. - from ..model_implementations.transformers.ds_transformer import inference_module DeepSpeedTransformerInference.layer_id = 0 DeepSpeedSelfAttention.num_layers = 0 - if inference_module is not None: - inference_module.release_workspace() - inference_module = None + self.workspace.release_workspace() def profile_model_time(self, use_cuda_events=True): if not self.model_profile_enabled and not self._config.enable_cuda_graph: @@ -321,7 +315,7 @@ def _validate_args(self, mpu, replace_with_kernel_inject): if self._config.checkpoint is not None and not isinstance(self._config.checkpoint, (str, dict)): raise ValueError(f"checkpoint must be None, str or dict, got {type(self._config.checkpoint)}") - supported_dtypes = [None, torch.half, torch.int8, torch.float] + supported_dtypes = [None, torch.half, torch.int8, torch.float, torch.bfloat16] if self._config.dtype not in supported_dtypes: raise ValueError(f"{self._config.dtype} not supported, valid dtype: {supported_dtypes}") @@ -589,7 +583,8 @@ def forward(self, *inputs, **kwargs): **kwargs: variable length keyword arguments """ start = None - if self.model_profile_enabled and get_accelerator().device_name() == 'cuda' and self._config.enable_cuda_graph: + if self.model_profile_enabled and (get_accelerator().device_name() == 'cuda' or get_accelerator().device_name() == 'hpu') and \ + self._config.enable_cuda_graph: get_accelerator().synchronize() start = time.time() @@ -634,3 +629,19 @@ def _generate(self, *inputs, **kwargs): ) return self.module.generate(*inputs, **kwargs) + + def compile(self, backend=get_accelerator().get_compile_backend(), compile_kwargs={}) -> None: + """ + Compile the module using the specified backend and kwargs. + """ + if not is_compile_supported(): + raise RuntimeError("compile is not supported in your version of PyTorch.") + + if self._is_compiled: + return + self.module.compile(backend=backend, **compile_kwargs) + self._is_compiled = True + + @property + def is_compiled(self) -> bool: + return self._is_compiled diff --git a/deepspeed/launcher/runner.py b/deepspeed/launcher/runner.py index 0ca89dd0497a..4095aabe0772 100755 --- a/deepspeed/launcher/runner.py +++ b/deepspeed/launcher/runner.py @@ -31,7 +31,9 @@ from deepspeed.accelerator import get_accelerator DLTS_HOSTFILE = "/job/hostfile" -EXPORT_ENVS = ['MLFLOW', 'PYTHON', 'MV2', 'UCX'] +EXPORT_ENVS = [ + 'MLFLOW', 'NCCL', 'PYTHON', 'MV2', 'UCX', 'PT_HPU_ENABLE_REFINE_DYNAMIC_SHAPES', 'PT_HPU_LAZY_ACC_PAR_MODE' +] EXPORT_ENVS += NEBULA_EXPORT_ENVS DEEPSPEED_ENVIRONMENT_NAME = os.getenv("DS_ENV_FILE", ".deepspeed_env") DEEPSPEED_ENVIRONMENT_PATHS = [os.path.expanduser("~"), '.'] @@ -483,6 +485,16 @@ def main(args=None): if args.elastic_training: assert not args.no_local_rank, "--no_local_rank argument is not supported in Elastic training" + if get_accelerator().device_name() == 'hpu': + # TODO: SW-113485 need to remove the below WA once SW-113485 is unblocked + def update_wa_env_var(key, value): + if key not in os.environ.keys(): + env[key] = value + + update_wa_env_var("PT_HPU_LAZY_ACC_PAR_MODE", "0") + # todo SW-125782: remove DYNAMIC SHAPE disable WA + update_wa_env_var("PT_HPU_ENABLE_REFINE_DYNAMIC_SHAPES", "0") + # encode world info as base64 to make it easier to pass via command line world_info_base64 = encode_world_info(active_resources) diff --git a/deepspeed/model_implementations/transformers/ds_llama2.py b/deepspeed/model_implementations/transformers/ds_llama2.py index 7d9eb4113a8a..325bfb4f7e18 100644 --- a/deepspeed/model_implementations/transformers/ds_llama2.py +++ b/deepspeed/model_implementations/transformers/ds_llama2.py @@ -4,11 +4,8 @@ # DeepSpeed Team import torch -from deepspeed import comm as dist from deepspeed.model_implementations.transformers.ds_transformer import DeepSpeedTransformerInference -inference_module = None - class DeepSpeedLlama2Inference(DeepSpeedTransformerInference): """Initialize the DeepSpeed OPT Transformer Layer. @@ -27,18 +24,10 @@ def forward(self, *args, **kwargs): input = args[0] input_mask = None - # Allocate memory only on first layer forward - if self.config.layer_id == 0 and self._alloc_workspace: - self.allocate_workspace(self.config.hidden_size, self.config.heads, - input.size()[1], - input.size()[0], DeepSpeedTransformerInference.layer_id, self.config.mp_size, - self.config.bigscience_bloom, - dist.get_rank() if dist.is_initialized() else 0, self.config.max_out_tokens, - self.config.min_out_tokens) - self._alloc_workspace = False - get_present = True + self.allocate_workspace(input.size()) + # We set the prev key/value to None when there is a prompt if input.shape[1] > 1: self.layer_past = None diff --git a/deepspeed/model_implementations/transformers/ds_transformer.py b/deepspeed/model_implementations/transformers/ds_transformer.py index d87d0de997b5..51216ca00ade 100644 --- a/deepspeed/model_implementations/transformers/ds_transformer.py +++ b/deepspeed/model_implementations/transformers/ds_transformer.py @@ -6,19 +6,18 @@ import torch import torch.nn as nn from deepspeed import comm as dist +from deepspeed.ops.transformer.inference.op_binding.layer_norm import LayerNormOp from deepspeed.utils.logging import log_dist from deepspeed.ops.transformer.inference.ds_mlp import DeepSpeedMLP from deepspeed.ops.transformer.inference.ds_attention import DeepSpeedSelfAttention, BloomSelfAttention +from deepspeed.ops.transformer.inference.op_binding.workspace import WorkspaceOp from deepspeed.accelerator import get_accelerator -from deepspeed.ops.op_builder import InferenceBuilder import deepspeed if deepspeed.HAS_TRITON: from deepspeed.ops.transformer.inference.triton.mlp import TritonMLP from deepspeed.ops.transformer.inference.triton.attention import TritonSelfAttention -inference_module = None - class DeepSpeedTransformerInference(nn.Module): """Initialize the DeepSpeed Transformer Layer. @@ -37,6 +36,7 @@ class DeepSpeedTransformerInference(nn.Module): for specific downstream tasks. """ layer_id = 0 + workspace = None def __init__(self, config, @@ -52,10 +52,6 @@ def __init__(self, DeepSpeedTransformerInference.layer_id += 1 data_type = torch.half if self.config.dtype == torch.int8 else self.config.dtype - global inference_module - if inference_module is None: - builder = InferenceBuilder() - inference_module = builder.load() if DeepSpeedTransformerInference.layer_id == 1: log_dist(f"DeepSpeed-Inference config: {self.config.__dict__}", [0]) @@ -87,23 +83,27 @@ def __init__(self, requires_grad=False) self.norm_b = nn.Parameter(torch.empty(self.config.hidden_size, dtype=data_type, device=device), requires_grad=False) - self.layer_past = None - try: - if config.dtype == torch.float32: - self.allocate_workspace = inference_module.allocate_workspace_fp32 - elif config.dtype == torch.bfloat16: - self.allocate_workspace = inference_module.allocate_workspace_bf16 - else: - self.allocate_workspace = inference_module.allocate_workspace_fp32 - self._alloc_workspace = True - except AttributeError: - self.allocate_workspace = None - self._alloc_workspace = False + + self.layer_norm = LayerNormOp() + DeepSpeedTransformerInference.workspace = WorkspaceOp(self.config) + self._should_allocate_workspace = True + self.allocate_workspace_func = self.workspace.allocate_workspace + + def allocate_workspace(self, size): + # Allocate memory only on first layer forward + if self.config.layer_id == 0 and self._should_allocate_workspace: + self.allocate_workspace_func(self.config.hidden_size, self.config.heads, size[1], size[0], + DeepSpeedTransformerInference.layer_id, self.config.mp_size, + self.config.bigscience_bloom, + dist.get_rank() if dist.is_initialized() else 0, self.config.max_out_tokens, + self.config.min_out_tokens) + self._should_allocate_workspace = False @classmethod def reset_cache(cls): - if inference_module is not None: - inference_module.reset_cache() + if cls.workspace is None: + cls.workspace = WorkspaceOp() + cls.workspace.reset_cache() def forward( self, @@ -136,23 +136,12 @@ def forward( input_mask = (input_mask if attn_mask is None else attn_mask) if attention_mask is None else attention_mask - # Allocate memory only on first layer forward - if self.config.layer_id == 0 and self._alloc_workspace: - self.allocate_workspace(self.config.hidden_size, self.config.heads, - input.size()[1], - input.size()[0], DeepSpeedTransformerInference.layer_id, self.config.mp_size, - self.config.bigscience_bloom, - dist.get_rank() if dist.is_initialized() else 0, self.config.max_out_tokens, - self.config.min_out_tokens) - self._alloc_workspace = False + self.allocate_workspace(input.size()) get_present = (get_present or get_key_value or use_cache) input_mask = input_mask if attention_mask is None else attention_mask - # We set the prev key/value to None when there is a prompt - if input.shape[1] > 1: - self.layer_past = None - layer_past = layer_past if layer_past is not None else self.layer_past + layer_past = past_key_value if past_key_value is not None else layer_past head_mask = layer_head_mask if layer_head_mask is not None else head_mask attn_mask = None @@ -178,14 +167,14 @@ def forward( output_attentions, self.norm_w, self.norm_b, - alibi) + alibi, + **kwargs) presents = (key, value) - self.layer_past = presents if layer_past is None else None output = self.mlp(attention_output, input, inp_norm, self.attention.attn_ob) if not self.config.pre_layer_norm: - output = inference_module.layer_norm(output, self.norm_w, self.norm_b, self.config.epsilon) + output = self.layer_norm(output, self.norm_w, self.norm_b, self.config.epsilon) output = output.to(input_type) if get_present: diff --git a/deepspeed/module_inject/auto_tp.py b/deepspeed/module_inject/auto_tp.py index bf9c2d74c635..20436fa506a7 100644 --- a/deepspeed/module_inject/auto_tp.py +++ b/deepspeed/module_inject/auto_tp.py @@ -133,7 +133,7 @@ def is_load_module(module): load_layers = [nn.Linear, nn.Embedding, nn.LayerNorm] load_layer_names = [ "LPLayerNorm", "SharedEmbedding", "OPTLearnedPositionalEmbedding", "LlamaRMSNorm", "FalconLinear", - "MistralRMSNorm", "T5LayerNorm" + "MistralRMSNorm", "T5LayerNorm", "MixtralRMSNorm", "Qwen2RMSNorm" ] return module.__class__ in load_layers or module._get_name() in load_layer_names @@ -303,6 +303,9 @@ def tp_parser(model): elif 'self_attention.dense' in layer and 'falcon' in str( type(module)): # this is a hack to get the right linear layer for this model! gem_list = gem_list + [layer] + # Mixtral-7x8b used w2*act(w1*w3) linear. need to replace w2 to linearallreduce. + elif 'w2' in layer and 'mixtral' in str(type(module)): + gem_list = gem_list + [layer] layer_list = [] if gem_list != []: diff --git a/deepspeed/module_inject/containers/base.py b/deepspeed/module_inject/containers/base.py index 83e109167ffe..ab6286325eed 100644 --- a/deepspeed/module_inject/containers/base.py +++ b/deepspeed/module_inject/containers/base.py @@ -203,6 +203,12 @@ def set_mlp(self, _h4h_w, _h4h_b, _4hh_w, _4hh_b): self._4hh_b = _4hh_b def set_layernorm(self, attn_nw, attn_nb, input_nw, input_nb): + #TODO SW-164572: remove below mark_step WA once SW-164573 is resolved. + if get_accelerator().device_name() == 'hpu': + import habana_frameworks.torch.hpu as thpu + if thpu.is_initialized(): + import habana_frameworks.torch.core as htcore + htcore.mark_step() self.attn_nw = attn_nw self.attn_nb = attn_nb self.input_nw = input_nw diff --git a/deepspeed/module_inject/containers/bloom.py b/deepspeed/module_inject/containers/bloom.py index 05f30eec8d85..c103b17e1559 100644 --- a/deepspeed/module_inject/containers/bloom.py +++ b/deepspeed/module_inject/containers/bloom.py @@ -23,6 +23,7 @@ def __init__(self, **kwargs): # All model specific things should be defined here instead of the base class. self.bigscience_bloom = True + self.triangular_masking = False def create_module(self, config=None): _config = config if config is not None else self.ds_model_config diff --git a/deepspeed/module_inject/policy.py b/deepspeed/module_inject/policy.py index 41df2b85dc0c..d9cb8c2d9eb5 100644 --- a/deepspeed/module_inject/policy.py +++ b/deepspeed/module_inject/policy.py @@ -27,7 +27,7 @@ class DSPolicy(ABC): _orig_layer_class = None def __init__(self): - self.cuda_graph_supported = False + self.cuda_graph_supported = False if get_accelerator().device_name() != 'hpu' else True @abstractmethod def attention(self): @@ -62,7 +62,7 @@ def __init__( # Type of normalization to perform norm_type=NormType.LayerNorm): super().__init__() - self.cuda_graph_supported = False + self.cuda_graph_supported = False if get_accelerator().device_name() != 'hpu' else True self.inference = inference self.linear_layer = linear_layer self.scale_attention = scale_attention diff --git a/deepspeed/module_inject/tp_shard.py b/deepspeed/module_inject/tp_shard.py index 79c19b5f1272..29e3e485a632 100644 --- a/deepspeed/module_inject/tp_shard.py +++ b/deepspeed/module_inject/tp_shard.py @@ -5,6 +5,8 @@ from deepspeed import comm as dist global num_kv_heads +# TODO: SW-184584 remove this WA. +is_old_shard_size = None def set_num_kv_heads(num): @@ -24,12 +26,17 @@ def get_num_kv_heads(): def get_shard_size(total_size, mp_size, name=None, rank=None): global num_kv_heads + # TODO: SW-184584 remove this WA. + global is_old_shard_size + if is_old_shard_size is None: + import os + is_old_shard_size = os.environ.get("HPU_DS_OLD_SHARD_SIZE", "1").lower() in ["true", "1"] last_linear = ["lm_head", "embed_out"] # When we have num_kv_heads defined, uneven division is possible, otherwise enforce near even division if rank == None: rank = dist.get_rank() - if num_kv_heads != None and total_size % num_kv_heads == 0 and "mlp" not in str(name) and str( - name) not in last_linear: + if num_kv_heads != None and (is_old_shard_size or (total_size % num_kv_heads == 0 and "mlp" not in str(name) + and str(name) not in last_linear)): my_slices = (num_kv_heads // mp_size) + (1 if rank < (num_kv_heads % mp_size) else 0) return total_size * my_slices // num_kv_heads else: diff --git a/deepspeed/moe/capacity_bins.py b/deepspeed/moe/capacity_bins.py new file mode 100644 index 000000000000..b225709da58f --- /dev/null +++ b/deepspeed/moe/capacity_bins.py @@ -0,0 +1,331 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import math +import torch +from typing import Union +from deepspeed import comm as dist +from deepspeed.utils import groups +from deepspeed.utils.bwc import bwc_tensor_model_parallel_world_size + + +class CapacityBins(torch.nn.Module): + """ CapacityBins - maps current capacity value into capacity bins. + + When using drop_tokens=false, the capacity at each iteration will differ since + we use a capacity to accommodate for the largest number of tokens sent to an expert. + This creates dynamic shapes tensors. + + The motivation for using bins is to reduce the dynamic shapes to a limited set, hence + being more friendly when running in non-eager mode (e.g., using compile). + + The minimum range of capacity is the optimal capacity where all tokens are evenly routed + among all experts. The maximum range of capacity is the worst-case capacity where all + tokens are routed to a single expert (unlikely, but a valid upper bound). + + This class maintains the current configured capacity bins. It also tracks bins usage info + which enables to dynamically update the capacity bins to optimize performance (i.e. to + minimize the number of dummy extra tokens that are routed). + + Upon initialization, if configured_bins provided, use configured_bins to initialize the bins. + Otherwise, the capacity bins are initialized to bins with exponentially growing width. + + Argument use_cpu forces capacity bins logic to be executed on the CPU (not on the accelerator). + When using torch.compile, this prevents potential graph breaks. + """ + + def __init__(self, + k: int, + num_experts: int, + num_capacity_bins: int, + capacity_bins_exp_base: float, + capacity_bins_alignment: int, + min_bin_size: int = 1, + configured_bins: Union[list, None] = None, + use_cpu=True) -> None: + super().__init__() + self.k = k + self.num_experts = num_experts + self.num_capacity_bins = num_capacity_bins + self.capacity_bins_exp_base = capacity_bins_exp_base + self.configured_alignment = capacity_bins_alignment + assert min_bin_size > 0, f'CapacityBins min_bin_size must be > 0, got {min_bin_size}' + self.min_bin_size = min_bin_size + if configured_bins is not None: + assert len(configured_bins) == self.num_capacity_bins, \ + f'Configured bins ({configured_bins}) does not match num capacity bins ({self.num_capacity_bins})' + assert all(bin_edge > 0 for bin_edge in configured_bins), \ + 'Configured bin edges must be > 0' + assert all(configured_bins[i] < configured_bins[i+1] for i in range(len(configured_bins)-1)), \ + 'Configured bin edges must be a strictly increasing list' + self.use_cpu = use_cpu + + # initialize usage stats + zero_bins = torch.zeros(num_capacity_bins, dtype=torch.long, device='cpu', requires_grad=False) + self.register_buffer('bins_usage', zero_bins.clone().detach()) + self.register_buffer('bins_usage_last', zero_bins.clone().detach()) + + # initialize bin edges + if configured_bins is not None: + self.register_buffer('capacity_bins', + torch.tensor(configured_bins, dtype=torch.long, device='cpu', requires_grad=False)) + else: + # we don't know the range of the capacity bins, therefore we create a zeroed tensor + # when we load from checkpoint, or during the first forward, we update the bins + # note that if the first element =0, it marks that capacity_bins is not initialized + self.register_buffer('capacity_bins', zero_bins.clone().detach()) + + # attribute self.device is the device to use for capacity bins logic, where attribute self.model_device + # is the device used by the model. attributes can be different in case use_cpu is configured. + self.device = None + self.model_device = None + + self.min_tokens_per_expert = None + self.max_tokens_per_expert = None + self.alignment = None + + def set_bins(self, bins: list): + with (torch.no_grad()): + # set the new capacity bins and clear the usage stats (not relevant for new bins) + self.capacity_bins.copy_(torch.tensor(bins, dtype=torch.long, device=self.device)) + self.bins_usage.zero_() + self.bins_usage_last.zero_() + + def get_stats(self, incremental=True): + + def is_usage_data_available(usage_tensor): + with torch.no_grad(): + return usage_tensor.sum().item() > 0 + + if not is_usage_data_available(self.bins_usage): + return None + + with torch.no_grad(): + # reduce stats across all workers; for that, we need to temporarily move stats to model device + bins_usage = self.bins_usage.clone().detach().to(self.model_device) + dist.all_reduce(bins_usage, op=dist.ReduceOp.SUM, group=dist.get_world_group()) + bins_usage = bins_usage.to(self.device) + + # incremental returns only the diff from last activation of get_stats() + if incremental: + delta_bins_usage = bins_usage + if is_usage_data_available(self.bins_usage_last): + delta_bins_usage -= self.bins_usage_last + self.bins_usage_last.copy_(bins_usage) + bins_usage = delta_bins_usage + + # stats are returned using cpu tensors + bins_usage = bins_usage.to('cpu') + bins_usage_list = bins_usage.tolist() + bins_edges = self.capacity_bins.clone().detach().to('cpu') + bins_edges_list = bins_edges.tolist() + stats = { + 'min_range': self.min_tokens_per_expert, + 'max_range': self.max_tokens_per_expert, + 'alignment': self.alignment, + 'min_bin_size': self.min_bin_size if self.min_bin_size is not None else 0, + 'edges': bins_edges, + 'usage': bins_usage, + 'summary': {f'bin{i}_{bins_edges_list[i]}': bins_usage_list[i] + for i in range(len(bins_usage))} + } + return stats + + def _save_device(self, tensor): + if self.device is None: + # set self.device to requested device for capacity bins logic. also keep device used by model + assert self.model_device is None, f'Expected model_device=None on 1st forward, but got {self.model_device}' + self.model_device = tensor.device + self.device = 'cpu' if self.use_cpu else self.model_device + + # move all model's buffers to device used for capacity bins logic + self.capacity_bins = self.capacity_bins.to(self.device) + self.bins_usage = self.bins_usage.to(self.device) + self.bins_usage_last = self.bins_usage_last.to(self.device) + + def get_binned_capacity(self, gate_output, capacity, update_stats=True): + with torch.no_grad(): + # on first forward, capture device used + # then, move inputs to requested capacity bins device + self._save_device(gate_output) + gate_output, capacity = gate_output.to(self.device), capacity.to(self.device) + + # get bins; if first call, calculate bins + bins = self._get_capacity_bins(gate_output) + + # find bin to use based on current capacity and update stats + index = torch.searchsorted(bins, capacity, right=False) + index = torch.min(index, torch.tensor(len(bins) - 1, dtype=capacity.dtype, device=self.device)) + if update_stats: + self._update_stats(index) + + return bins[index].to(self.model_device) + + def _update_stats(self, index): + # currently we maintain stats for training only + if self.training: + self.bins_usage[index] += 1 + + def _generate_bins(self, force_start_bin=False): + # create exponentially growing width bins, and normalize width sum to 1.0 + # when force_start_bin=True, we force the first bin value = start range (aka start). + # force_start_bin=True is handled by prepending width=0 + start = self.min_tokens_per_expert + stop = self.max_tokens_per_expert + exp_base = torch.tensor(self.capacity_bins_exp_base, dtype=torch.float).to(self.device) + if force_start_bin: + bin_widths = exp_base**torch.arange(0, self.num_capacity_bins - 1, device=self.device) + bin_widths = torch.cat([torch.tensor([0.], device=bin_widths.device), bin_widths]) + else: + bin_widths = exp_base**torch.arange(0, self.num_capacity_bins, device=self.device) + normalized_bin_widths = bin_widths / torch.sum(bin_widths) + + # calculate bin edges by accumulating the bins width and scaling to [start...stop] range + # finally, align bin edges + bin_edges = torch.cumsum(normalized_bin_widths, dim=0) + bin_edges = start + (stop - start) * bin_edges + bin_edges = torch.ceil(bin_edges / self.alignment).mul(self.alignment).to(torch.long) + + # verify that we got N distinct capacity bins + assert len(set(bin_edges.tolist())) == self.num_capacity_bins, \ + f'Resulting capacity bins size != {self.num_capacity_bins}, bins={bin_edges.tolist()}' + + return bin_edges + + def _verify_configured_bins(self): + """ This method runs once (at first forward) and verifies that configured bins are valid """ + # verify configured bins range + assert self.capacity_bins[0].item() >= self.min_tokens_per_expert + assert self.capacity_bins[-1].item() >= self.max_tokens_per_expert + # verify configured bins alignment + alignment = torch.tensor(self.alignment, dtype=torch.long, device=self.device) + assert torch.remainder(self.capacity_bins, alignment).sum().item() == 0, \ + f'Invalid capacity_bins={self.capacity_bins.clone().detach().cpu().tolist()}, alignment={self.alignment} ' + + def _get_capacity_bins(self, gate_output: torch.Tensor) -> Union[torch.Tensor, None]: + """ Generates capacity bins with exponential growing width. + + During training, we encourage tokens to be evenly routed (via aux loss). + Therefore, generate bins with exponential growing bins width, i.e., bins that are + closer to the start are smaller and thus have less extra non-required capacity. + + Alignment is required when the bins have to be aligned on a specific value. + For example: + 1. Configured alignment (capacity_bins_alignment) due to e.g. hardware specific considerations + 2. When the non-experts are using TP and the experts ate not using TP, we + need to align the bins on TP boundary. + + Args: + gate_output (torch.Tensor): router gating function output tensor + + Returns: + bins tensor (torch.Tensor dtype=torch.long) + """ + # in case of first forward, initialize information based on gate_output + if self.min_tokens_per_expert is None: + # calculate optimal and worst case (min and max) tokens per expert + n_tokens_in_micro_batch = torch.tensor(gate_output.shape[0], device=gate_output.device).to(torch.long) + n_optimal_tokens_per_expert = torch.ceil(self.k * n_tokens_in_micro_batch / self.num_experts).to( + torch.long) + self.min_tokens_per_expert = n_optimal_tokens_per_expert.item() + self.max_tokens_per_expert = n_tokens_in_micro_batch.item() + # handle bin alignment - maximum between configured alignment and TP (if used) + tp_alignment = 1 + if groups._get_expert_model_parallel_world_size() == 1 and groups.mpu is not None: + tp_alignment = bwc_tensor_model_parallel_world_size(groups.mpu) + self.alignment = max(self.configured_alignment, tp_alignment) + + # if bins configured (either configured by user or loaded from checkpoint) - verify valid bins + # otherwise, initialize bins + if self.capacity_bins[0] > 0: + self._verify_configured_bins() + else: + self.capacity_bins = self._generate_bins() + + return self.capacity_bins + + +def optimize_bins(min_range, bins: torch.Tensor, bins_usage: torch.Tensor, alignment, min_bin_size) -> list: + """ Optimize MOE capacity bins according to collected bins usage statistics + + The bins are optimized to minimize the cost of binning. + The cost of each bin is defined as the additional tokens processed in this bin. + Since we don't have the actual capacities that were mapped to each bin, we use the median of the bin. + After we calculate the cost of all bins, we iteratively try to replace the lowest and highest cost bins + with 2 bins: the original highest cost bin and the median of the highest cost bin. + This way, we keep the number of bins constant while decreasing the overall cost of binning. + + For example: + Given bins [150, 200, 250, 300] with start of range=100 + And usage [100, 0, 50, 10 ] + + We first calculate the cost of each bin: + Cost: [25*100, 25*0, 25*50, 25*10] = [2500, 0, 1250, 250] + + Lowest cost bin is 200 (index=1) + Highest cost bin is 150 (index=0) + + First iteration of optimization: + Remove bin1 and split bin0 --> [125, 150, 250, 300] + """ + + def align_to(value): + return int(math.ceil(value / alignment) * alignment) + + # sort bins by their cost of usage (we want to split high cost bins) + # we assume that for each bin, the cost is 1/2 of its width * usage count + shifted_bins = torch.cat([torch.tensor([min_range], dtype=bins.dtype, device=bins.device), bins[:-1]]) + width = bins - shifted_bins + cost = bins_usage * width / 2.0 + sorted_cost = torch.argsort(cost, descending=False, stable=True).tolist() + + # sorted cost is in ascending order + # min_sort_idx is current index into sorted_cost for candidate bin to be removed + # max_sort_idx is current index into sorted_cost for candidate bin to be split + bins = bins.tolist() + n_bins = len(bins) + min_sort_idx = 0 + max_sort_idx = n_bins - 1 + new_bins = [] + while min_sort_idx <= max_sort_idx: + # if same cost, keep all remaining bins and exit + # this also handles the case of min_sort_idx == max_sort_idx + min_cost = cost[sorted_cost[min_sort_idx]] + max_cost = cost[sorted_cost[max_sort_idx]] + if min_cost == max_cost: + bin_indexes = sorted_cost[min_sort_idx:max_sort_idx + 1] + new_bins.extend([bins[idx] for idx in bin_indexes]) + break + + # last bin can't be removed + min_bin_idx = sorted_cost[min_sort_idx] + if min_bin_idx == (n_bins - 1): + new_bins.append(bins[min_bin_idx]) + min_sort_idx += 1 + continue + + # calculate the left & right bin's width of the candidate bin after we split it to 2 + # verify that both left & right will meet the min bin size requirement + max_bin_idx = sorted_cost[max_sort_idx] + max_bin_start = min_range if max_bin_idx == 0 else bins[max_bin_idx - 1] + max_bin_end = bins[max_bin_idx] + mid_point = (max_bin_start + max_bin_end) // 2 + mid_point = align_to(mid_point) + left_bin_width = mid_point - max_bin_start + right_bin_width = max_bin_end - mid_point + if left_bin_width < min_bin_size or right_bin_width < min_bin_size: + new_bins.append(bins[max_bin_idx]) + max_sort_idx -= 1 + continue + + # skip min cost bin and split max cost bin + new_bins.append(mid_point) + new_bins.append(max_bin_end) + min_sort_idx += 1 + max_sort_idx -= 1 + + # sort the bins in ascending order + bins = sorted(new_bins) + return bins diff --git a/deepspeed/moe/layer.py b/deepspeed/moe/layer.py index dfa9fcf4f464..c2459e2d6afb 100644 --- a/deepspeed/moe/layer.py +++ b/deepspeed/moe/layer.py @@ -10,8 +10,10 @@ from torch.nn import functional as F from deepspeed.utils import groups, log_dist +from deepspeed.utils.bwc import bwc_tensor_model_parallel_world_size from .experts import Experts from .sharded_moe import MOELayer, TopKGate +from deepspeed.accelerator import get_accelerator class MoE(nn.Module): @@ -33,6 +35,10 @@ class MoE(nn.Module): use_tutel (bool, optional): default=False, whether to use Tutel optimizations (if installed). enable_expert_tensor_parallelism (bool, optional): default=False, whether to use tensor parallelism for experts top2_2nd_expert_sampling (bool, optional): default=True, whether to perform sampling for 2nd expert + num_capacity_bins (int, optional): default=0, number of capacity bins to use in case of drop_tokens=False + capacity_bins_exp_base (float, optional): default=2.0, in case of capacity bins, exponential growing factor for bin width + capacity_bins_alignment (int, optional): default=1, in case of capacity bins, required bins alignment + configured_capacity_bins (list, optional): default=None, explicit configuration of capacity bin edges """ def __init__(self, @@ -50,7 +56,12 @@ def __init__(self, use_rts: bool = True, use_tutel: bool = False, enable_expert_tensor_parallelism: bool = False, - top2_2nd_expert_sampling: bool = True) -> None: + top2_2nd_expert_sampling: bool = True, + sequence_parallel: bool = False, + num_capacity_bins: int = 0, + capacity_bins_exp_base: float = 2.0, + capacity_bins_alignment: int = 1, + configured_capacity_bins: Optional[list] = None) -> None: super(MoE, self).__init__() @@ -61,7 +72,10 @@ def __init__(self, self.expert_group_name = f"ep_size_{self.ep_size}" self.num_experts = num_experts self.num_local_experts = num_experts // self.ep_size - + self.sequence_parallel = sequence_parallel + self.drop_tokens = drop_tokens + #TODO SW-179530: remove workaround when issue with lazy is resolved (see SW-179530). + expert.to(get_accelerator().device_name()) log_dist( f'Creating MoE layer with num_experts: {num_experts} | num_local_experts: {self.num_local_experts} | expert_parallel_size: {self.ep_size}', [0]) @@ -70,14 +84,28 @@ def __init__(self, 'Unsupported noisy_gate_policy: ' + noisy_gate_policy experts = Experts(expert, self.num_local_experts, self.expert_group_name) - self.deepspeed_moe = MOELayer(TopKGate(hidden_size, num_experts, k, capacity_factor, eval_capacity_factor, - min_capacity, noisy_gate_policy, drop_tokens, use_rts, - top2_2nd_expert_sampling), + self.deepspeed_moe = MOELayer(TopKGate(hidden_size, + num_experts, + k, + capacity_factor, + eval_capacity_factor, + min_capacity, + noisy_gate_policy, + drop_tokens, + use_rts, + None, + top2_2nd_expert_sampling, + self.sequence_parallel, + num_capacity_bins, + capacity_bins_exp_base, + capacity_bins_alignment, + configured_bins=configured_capacity_bins), experts, self.expert_group_name, self.ep_size, self.num_local_experts, - use_tutel=use_tutel) + use_tutel=use_tutel, + sequence_parallel=self.sequence_parallel) if self.use_residual: self.mlp = expert # coefficient is used for weighted sum of the output of expert and mlp @@ -87,20 +115,31 @@ def set_deepspeed_parallelism(self, use_data_before_expert_parallel_: bool = Fal self._create_process_groups(use_data_before_expert_parallel_=use_data_before_expert_parallel_) def _create_process_groups(self, use_data_before_expert_parallel_: bool = False) -> None: + # For sequence-parallel + expert-tp + no token-dropping, create a process group with ranks of EP + TP. + # This group is required to reduce_max the local token capacity across EP + TP ranks. + tp_enabled = bwc_tensor_model_parallel_world_size(groups.mpu) > 1 + expert_tp_enabled = self.enable_expert_tensor_parallelism and tp_enabled + use_ep_tp_group = self.sequence_parallel and not self.drop_tokens and expert_tp_enabled + # Create process group for a layer if needed if self.expert_group_name not in groups._get_expert_parallel_group_dict(): print(f"No existing process group found, creating a new group named: {self.expert_group_name}") - if (groups.mpu is None) or (not self.enable_expert_tensor_parallelism): - # Condition 1 - no groups.mpu means no tensor parallelism - # Condition 2 - disabling expert tensor parallelism on purpose + if not expert_tp_enabled: + # expert tensor parallelism is disabled, use only expert parallelism and data parallelism groups._create_expert_and_data_parallel( self.ep_size, use_data_before_expert_parallel_=use_data_before_expert_parallel_) else: - # expert tensor parallelism is enabled + # expert tensor parallelism is enabled, use expert, data and tensor parallelism groups._create_expert_data_and_model_parallel( - self.ep_size, mpu=groups.mpu, use_data_before_expert_parallel_=use_data_before_expert_parallel_) + self.ep_size, + mpu=groups.mpu, + use_data_before_expert_parallel_=use_data_before_expert_parallel_, + create_expert_tensor_parallel_group=use_ep_tp_group) + # Set the group handle for the MOELayer (deepspeed_moe) object self.deepspeed_moe._set_ep_group(groups._get_expert_parallel_group(self.expert_group_name)) + if use_ep_tp_group: + self.deepspeed_moe._set_ep_tp_group(groups._get_expert_tensor_parallel_group(self.expert_group_name)) def forward(self, hidden_states: torch.Tensor, diff --git a/deepspeed/moe/mappings.py b/deepspeed/moe/mappings.py index 6c501ea6503a..b8a06274343a 100644 --- a/deepspeed/moe/mappings.py +++ b/deepspeed/moe/mappings.py @@ -23,6 +23,8 @@ import torch import deepspeed +from deepspeed.utils.bwc import (bwc_tensor_model_parallel_world_size, bwc_tensor_model_parallel_rank, + bwc_tensor_model_parallel_group) def _gather_tokens(input_, dim=0): @@ -31,11 +33,11 @@ def _gather_tokens(input_, dim=0): input_ = input_.contiguous() # Size and dimension. - rank = mpu.get_tensor_model_parallel_rank() + rank = bwc_tensor_model_parallel_rank(mpu) - tensor_list = [torch.empty_like(input_) for _ in range(mpu.get_tensor_model_parallel_world_size())] + tensor_list = [torch.empty_like(input_) for _ in range(bwc_tensor_model_parallel_world_size(mpu))] tensor_list[rank] = input_ - deepspeed.comm.all_gather(tensor_list, input_, group=mpu.get_tensor_model_parallel_group()) + deepspeed.comm.all_gather(tensor_list, input_, group=bwc_tensor_model_parallel_group(mpu)) # Note: torch.cat already creates a contiguous tensor. output = torch.cat(tensor_list, dim=dim).contiguous() @@ -47,8 +49,8 @@ def _drop_tokens(input_, dim=0): """Divide a tensor among the tensor parallel ranks""" mpu = deepspeed.utils.groups.mpu - total_chunks = mpu.get_tensor_model_parallel_world_size() - this_chunk = mpu.get_tensor_model_parallel_rank() + total_chunks = bwc_tensor_model_parallel_world_size(mpu) + this_chunk = bwc_tensor_model_parallel_rank(mpu) assert input_.shape[ dim] % total_chunks == 0, f"input dimension {dim} ({input_.shape[dim]}) is not divisible by tensor parallel world size ({total_chunks})" chunk_size = input_.shape[dim] // total_chunks @@ -92,7 +94,7 @@ def backward(ctx, input_): def gather_tokens(input_, dim=0): mpu = deepspeed.utils.groups.mpu - if mpu is None or mpu.get_tensor_model_parallel_world_size() == 1: + if mpu is None or bwc_tensor_model_parallel_world_size(mpu) == 1: # no tensor parallelism for non-experts return input_ return _GatherTokens.apply(input_, dim) @@ -100,7 +102,7 @@ def gather_tokens(input_, dim=0): def drop_tokens(input_, dim=0): mpu = deepspeed.utils.groups.mpu - if mpu is None or mpu.get_tensor_model_parallel_world_size() == 1: + if mpu is None or bwc_tensor_model_parallel_world_size(mpu) == 1: # no tensor parallelism for non-experts return input_ return _DropTokens.apply(input_, dim) diff --git a/deepspeed/moe/sharded_moe.py b/deepspeed/moe/sharded_moe.py index d6c023ec11d3..8285215ad9c3 100644 --- a/deepspeed/moe/sharded_moe.py +++ b/deepspeed/moe/sharded_moe.py @@ -17,7 +17,8 @@ from deepspeed.utils.timer import SynchronizedWallClockTimer from deepspeed.utils import logger -from typing import Callable, Dict, TYPE_CHECKING, Any, Optional, Tuple +from deepspeed.utils.bwc import bwc_tensor_model_parallel_world_size +from typing import Callable, Dict, TYPE_CHECKING, Any, Optional, Tuple, Union import torch from torch import Tensor @@ -25,6 +26,7 @@ import torch.nn.functional as F from deepspeed.utils import groups from .mappings import drop_tokens, gather_tokens +from .capacity_bins import CapacityBins if TYPE_CHECKING: Base = Module[Tensor] @@ -177,6 +179,31 @@ def _one_hot_to_float(x, num_classes): return F.one_hot(x, num_classes=num_classes).float() +def _get_no_drop_tokens_capacity(capacity: Tensor, ep_group, ep_tp_group) -> Tensor: + # Communicate across expert processes to pick the maximum capacity. + # If sequence parallel and experts-tp is enabled, the capacity of all tp ranks must also be equal + # (required since in MLP layer, we all_gather the tokens across tp) + group = ep_tp_group if ep_tp_group is not None else ep_group + dist.all_reduce(capacity, op=dist.ReduceOp.MAX, group=group) + + if groups._get_expert_model_parallel_world_size() == 1: + # If the non-expert is tensor-parallel, we need to pad the capacity to 'tp'. + # This is since we are going to activate drop_tokens() to drop duplicate tokens. + tp = 1 if groups.mpu is None else bwc_tensor_model_parallel_world_size(mpu=groups.mpu) + capacity = torch.ceil(capacity / tp).mul(tp).to(capacity.dtype) + return capacity + + +def _calculate_expert_weight(gates: Tensor, mask: Tensor, locations: Tensor, capacity: Tensor) -> Tensor: + gates = einsum("s,se->se", gates, mask) + locations = _one_hot_to_float(locations, capacity) + return einsum("se,sc->sec", gates, locations) + + +def align_values(t, alignment): + return torch.ceil(t / alignment).mul(alignment).to(torch.long) + + def top1gating(logits: Tensor, capacity_factor: float, min_capacity: int, @@ -184,7 +211,10 @@ def top1gating(logits: Tensor, noisy_gate_policy: Optional[str] = None, drop_tokens: bool = True, use_rts: bool = True, - use_tutel: bool = False) -> Tuple[Tensor, Tensor, Tensor, Tensor]: + ep_group: Union[torch.distributed.ProcessGroup, None] = None, + use_tutel: bool = False, + ep_tp_group: Union[torch.distributed.ProcessGroup, None] = None, + capacity_bins: Union[CapacityBins, None] = None) -> Tuple[Tensor, Tensor, Tensor, Tensor]: """Implements Top1Gating on logits.""" if noisy_gate_policy == 'RSample': logits_w_noise = logits + gumbel_rsample(logits.shape, device=logits.device) @@ -204,18 +234,16 @@ def top1gating(logits: Tensor, mask1 = einsum("s,se->se", used_token, mask1) # gating decisions - exp_counts = torch.sum(mask1, dim=0).detach().to('cpu') + exp_counts = torch.sum(mask1, dim=0) # if we don't want to drop any tokens if not drop_tokens: - new_capacity = torch.max(exp_counts).to(logits.device) - dist.all_reduce(new_capacity, op=dist.ReduceOp.MAX, group=dist.get_world_group()) - if groups._get_expert_model_parallel_world_size() == 1: - # If the non-expert is tensor-parallel, we need to pad the capacity to 'tp'. - # This is since we are going to activate drop_tokens() to drop duplicate tokens. - tp = 1 if groups.mpu is None else groups.mpu.get_tensor_model_parallel_world_size() - new_capacity = torch.ceil(new_capacity / tp).mul(tp).to(new_capacity.dtype) - capacity = new_capacity + capacity = torch.max(exp_counts) + capacity = _get_no_drop_tokens_capacity(capacity, ep_group, ep_tp_group) + if capacity_bins is not None: + capacity = capacity_bins.get_binned_capacity(gate_output=logits, capacity=capacity) + + exp_counts = exp_counts.detach().to('cpu') # Compute l_aux me = torch.mean(gates, dim=0) @@ -284,7 +312,10 @@ def top2gating(logits: Tensor, capacity_factor: float, min_capacity: int, drop_tokens: bool = True, - top2_2nd_expert_sampling: bool = True) -> Tuple[Tensor, Tensor, Tensor, Tensor]: + ep_group: Union[torch.distributed.ProcessGroup, None] = None, + top2_2nd_expert_sampling: bool = True, + ep_tp_group: Union[torch.distributed.ProcessGroup, None] = None, + capacity_bins: Union[CapacityBins, None] = None) -> Tuple[Tensor, Tensor, Tensor, Tensor]: """Implements Top2Gating on logits.""" # everything is in fp32 in this function gates = F.softmax(logits, dim=1) @@ -293,11 +324,10 @@ def top2gating(logits: Tensor, indices1_s = torch.argmax(gates, dim=1) num_experts = int(gates.shape[1]) mask1 = F.one_hot(indices1_s, num_classes=num_experts) - if top2_2nd_expert_sampling: # Create a mask for 2nd's expert per token using Gumbel-max trick # https://timvieira.github.io/blog/post/2014/07/31/gumbel-max-trick/ - logits += gumbel_rsample(logits.shape, device=logits.device) + logits = logits + gumbel_rsample(logits.shape, device=logits.device) # Replace top-expert with min value logits_except1 = logits.masked_fill(mask1.bool(), float("-inf")) @@ -325,14 +355,10 @@ def top2gating(logits: Tensor, mask2 *= torch.lt(locations2, capacity) else: # Do not drop tokens - set capacity according to current expert assignments - new_capacity = torch.max(exp_counts) - dist.all_reduce(new_capacity, op=dist.ReduceOp.MAX, group=dist.get_world_group()) - if groups._get_expert_model_parallel_world_size() == 1: - # If the non-expert is tensor-parallel, we need to pad the capacity to 'tp'. - # This is since we are going to activate drop_tokens() to drop duplicate tokens. - tp = 1 if groups.mpu is None else groups.mpu.get_tensor_model_parallel_world_size() - new_capacity = torch.ceil(new_capacity / tp).mul(tp).to(new_capacity.dtype) - capacity = new_capacity + capacity = torch.max(exp_counts) + capacity = _get_no_drop_tokens_capacity(capacity, ep_group, ep_tp_group) + if capacity_bins is not None: + capacity = capacity_bins.get_binned_capacity(gate_output=logits, capacity=capacity) # Store the capacity location for each token locations1_s = torch.sum(locations1 * mask1, dim=1) @@ -350,13 +376,8 @@ def top2gating(logits: Tensor, gates2_s /= denom_s # Calculate combine_weights and dispatch_mask - gates1 = einsum("s,se->se", gates1_s, mask1_float) - gates2 = einsum("s,se->se", gates2_s, mask2_float) - locations1_sc = _one_hot_to_float(locations1_s, capacity) - locations2_sc = _one_hot_to_float(locations2_s, capacity) - combine1_sec = einsum("se,sc->sec", gates1, locations1_sc) - combine2_sec = einsum("se,sc->sec", gates2, locations2_sc) - combine_weights = combine1_sec + combine2_sec + combine_weights = _calculate_expert_weight(gates1_s, mask1_float, locations1_s, capacity) + combine_weights += _calculate_expert_weight(gates2_s, mask2_float, locations2_s, capacity) dispatch_mask = combine_weights.bool() return l_aux, combine_weights, dispatch_mask, exp_counts.detach().to('cpu') @@ -374,7 +395,7 @@ class TopKGate(Module): Args: model_dim (int): size of model embedding dimension - num_experts (ints): + num_experts (int): number of experts in model """ @@ -390,13 +411,22 @@ def __init__(self, noisy_gate_policy: Optional[str] = None, drop_tokens: bool = True, use_rts: bool = True, - top2_2nd_expert_sampling: bool = True) -> None: + ep_group: Union[torch.distributed.ProcessGroup, None] = None, + top2_2nd_expert_sampling: bool = True, + sequence_parallel: bool = False, + num_capacity_bins: int = 0, + capacity_bins_exp_base: float = 2.0, + capacity_bins_alignment: int = 1, + configured_bins: Union[list, None] = None) -> None: super().__init__() # Only top-1 and top-2 are supported at the moment. if k != 1 and k != 2: raise ValueError('Only top-1 and top-2 gatings are supported.') self.wg = torch.nn.Linear(model_dim, num_experts, bias=False) + self.ep_group = ep_group + self.ep_tp_group = None + self.num_experts = num_experts self.k = k self.capacity_factor = capacity_factor self.eval_capacity_factor = eval_capacity_factor @@ -408,6 +438,27 @@ def __init__(self, self.drop_tokens = drop_tokens self.use_rts = use_rts self.top2_2nd_expert_sampling = top2_2nd_expert_sampling + self.sequence_parallel = sequence_parallel + if self.sequence_parallel: + setattr(self.wg.weight, 'sequence_parallel', True) + self.capacity_bins = None + if not self.drop_tokens and num_capacity_bins > 0: + assert capacity_bins_exp_base > 1.0, \ + f'capacity_bins_exp_base must be > 1.0, but got {capacity_bins_exp_base}' + self.capacity_bins = CapacityBins(k, + num_experts, + num_capacity_bins, + capacity_bins_exp_base, + capacity_bins_alignment, + configured_bins=configured_bins) + + def _set_ep_group(self, ep_group): + assert self.ep_group is None, f'Attempting to override an existing ep_group' + self.ep_group = ep_group + + def _set_ep_tp_group(self, ep_tp_group): + assert self.ep_tp_group is None, f'Attempting to override an existing ep_tp_group' + self.ep_tp_group = ep_tp_group def forward(self, input: torch.Tensor, @@ -426,11 +477,13 @@ def forward(self, if self.k == 1: gate_output = top1gating(logits, self.capacity_factor if self.training else self.eval_capacity_factor, self.min_capacity, used_token, self.noisy_gate_policy if self.training else None, - self.drop_tokens, self.use_rts, use_tutel) + self.drop_tokens, self.use_rts, self.ep_group, use_tutel, self.ep_tp_group, + self.capacity_bins) else: gate_output = top2gating(logits, self.capacity_factor if self.training else self.eval_capacity_factor, - self.min_capacity, self.drop_tokens, self.top2_2nd_expert_sampling) + self.min_capacity, self.drop_tokens, self.ep_group, self.top2_2nd_expert_sampling, + self.ep_tp_group, self.capacity_bins) if self.wall_clock_breakdown: self.timers(TOPK_GATE_TIMER).stop() @@ -438,6 +491,33 @@ def forward(self, return gate_output + def get_stats(self, incremental=True): + if self.capacity_bins is not None: + capacity_stats = self.capacity_bins.get_stats(incremental) + if capacity_stats is not None: + return {'capacity_bins': capacity_stats} + return None + + def has_capacity_bins(self): + return self.capacity_bins is not None + + def get_capacity_bins(self): + return self.capacity_bins + + +class AuxLossForSequenceParallel(torch.autograd.Function): + """ For sequence parallel, we need to divide the auxiliary loss gradient by TP. """ + + @staticmethod + def forward(ctx, aux_loss): + return aux_loss.clone() + + @staticmethod + def backward(ctx, grad_output): + grad_input = grad_output.clone() + grad_input *= (1.0 / bwc_tensor_model_parallel_world_size(mpu=groups.mpu)) + return grad_input + class MOELayer(Base): """MOELayer module which implements MixtureOfExperts as described in Gshard_. @@ -463,7 +543,8 @@ def __init__(self, ep_group_name, ep_size, num_local_experts: int, - use_tutel: bool = False) -> None: + use_tutel: bool = False, + sequence_parallel: bool = False) -> None: super().__init__() self.gate = gate self.experts = experts @@ -476,6 +557,8 @@ def __init__(self, self.time_moe = 0.0 self.timers = SynchronizedWallClockTimer() self.wall_clock_breakdown = False + self.sequence_parallel = sequence_parallel + self.l_aux = None self.use_tutel = use_tutel and TUTEL_INSTALLED and gate.k == 1 @@ -490,6 +573,11 @@ def __init__(self, def _set_ep_group(self, ep_group): self.ep_group = ep_group + self.gate._set_ep_group(ep_group) + + def _set_ep_tp_group(self, ep_tp_group): + self.ep_tp_group = ep_tp_group + self.gate._set_ep_tp_group(ep_tp_group) def forward(self, *input: Tensor, **kwargs: Any) -> Tensor: @@ -504,6 +592,8 @@ def forward(self, *input: Tensor, **kwargs: Any) -> Tensor: # group_size = kwargs['group_size'] if 'group_size' in kwargs.keys() else 1 reshaped_input = input[0].reshape(-1, d_model) + expert_model_parallel_world_size = groups._get_expert_model_parallel_world_size() + if self.use_tutel: self.l_aux, C, E, indices_, locations_, gates_, self.exp_counts = self.gate(reshaped_input, input[1], True) S, M = reshaped_input.size(0), reshaped_input.size(1) @@ -513,13 +603,16 @@ def forward(self, *input: Tensor, **kwargs: Any) -> Tensor: self._tutel_dispatcher.update(indices_, locations_, gates_, capacity=C) dispatched_input = self._tutel_dispatcher.encode(reshaped_input) else: - self.l_aux, combine_weights, dispatch_mask, self.exp_counts = self.gate(reshaped_input, input[1]) + l_aux, combine_weights, dispatch_mask, self.exp_counts = self.gate(reshaped_input, input[1]) dispatched_input = einsum("sec,sm->ecm", dispatch_mask.type_as(input[0]), reshaped_input) + if self.sequence_parallel and expert_model_parallel_world_size > 1: + l_aux = AuxLossForSequenceParallel.apply(l_aux) + self.l_aux = l_aux if self.wall_clock_breakdown: self.timers(FIRST_ALLTOALL_TIMER).start() - if groups._get_expert_model_parallel_world_size() == 1: + if expert_model_parallel_world_size == 1: # If the non-expert is tensor-parallel, it will create # duplicate tokens on the tensor-parallel ranks. # Since our experts are not tensor-parallel, these duplicates diff --git a/deepspeed/moe/utils.py b/deepspeed/moe/utils.py index 8e1faffc3541..f52fe2e3442d 100644 --- a/deepspeed/moe/utils.py +++ b/deepspeed/moe/utils.py @@ -146,3 +146,7 @@ def split_params_into_different_moe_groups_for_optimizer( param_groups.append(param_group) return param_groups + + +def is_moe_param_group(param_group): + return param_group.get('moe', False) diff --git a/deepspeed/ops/adagrad/cpu_adagrad.py b/deepspeed/ops/adagrad/cpu_adagrad.py index c356a52777f2..dbde6d95f652 100755 --- a/deepspeed/ops/adagrad/cpu_adagrad.py +++ b/deepspeed/ops/adagrad/cpu_adagrad.py @@ -34,7 +34,7 @@ def __setstate__(self, state): group.setdefault('amsgrad', False) @torch.no_grad() - def step(self, closure=None, fp16_param_groups=None): + def step(self, closure=None): """Update the model parameters. .. note:: @@ -46,8 +46,6 @@ def step(self, closure=None, fp16_param_groups=None): Args: closure (callable, optional): closure to compute the loss. Defaults to ``None``. - fp16_param_groups: FP16 GPU parameters to update. Performing the - copy here reduces communication time. Defaults to ``None``. Returns: loss: if ``closure`` is provided. Otherwise ``None``. @@ -94,16 +92,7 @@ def step(self, closure=None, fp16_param_groups=None): sparse_exp_avg_sq.values()) p[sparse_param.indices()] = sparse_param.values() state['exp_avg_sq'][sparse_exp_avg_sq.indices()] = sparse_exp_avg_sq.values() - if fp16_param_groups is not None: - fp16_param_groups[group_id][param_id][sparse_param.indices()] = sparse_param.values() else: - if fp16_param_groups is not None: - self.ds_opt_adagrad.adagrad_update_copy(self.opt_id, state['step'], group['lr'], group['eps'], - group['weight_decay'], p.data, p.grad.data, - state['exp_avg_sq'], - fp16_param_groups[group_id][param_id].data) - else: - self.ds_opt_adagrad.adagrad_update(self.opt_id, state['step'], group['lr'], group['eps'], - group['weight_decay'], p.data, p.grad.data, - state['exp_avg_sq']) + self.ds_opt_adagrad.adagrad_update(self.opt_id, state['step'], group['lr'], group['eps'], + group['weight_decay'], p.data, p.grad.data, state['exp_avg_sq']) return loss diff --git a/deepspeed/ops/adam/cpu_adam.py b/deepspeed/ops/adam/cpu_adam.py index 10b8c15f970b..e0a72a494257 100755 --- a/deepspeed/ops/adam/cpu_adam.py +++ b/deepspeed/ops/adam/cpu_adam.py @@ -107,7 +107,7 @@ def __setstate__(self, state): group.setdefault('amsgrad', False) @torch.no_grad() - def step(self, closure=None, fp16_param_groups=None): + def step(self, closure=None): """Update the model parameters. .. note:: @@ -119,8 +119,6 @@ def step(self, closure=None, fp16_param_groups=None): Args: closure (callable, optional): closure to compute the loss. Defaults to ``None``. - fp16_param_groups: FP16 GPU parameters to update. Performing the - copy here reduces communication time. Defaults to ``None``. Returns: loss: if ``closure`` is provided. Otherwise ``None``. @@ -134,13 +132,6 @@ def step(self, closure=None, fp16_param_groups=None): # intended device for step device = torch.device('cpu') - # converting the fp16 params to a group of parameter - if type(fp16_param_groups) is list: - if type(fp16_param_groups[0]) is not list: - fp16_param_groups = [fp16_param_groups] - elif fp16_param_groups is not None: - fp16_param_groups = [[fp16_param_groups]] - for group_id, group in enumerate(self.param_groups): for param_id, p in enumerate(group['params']): @@ -169,13 +160,7 @@ def step(self, closure=None, fp16_param_groups=None): state['step'] += 1 beta1, beta2 = group['betas'] - if fp16_param_groups is not None: - self.ds_opt_adam.adam_update_copy(self.opt_id, state['step'], group['lr'], beta1, beta2, - group['eps'], group['weight_decay'], group['bias_correction'], - p.data, p.grad.data, state['exp_avg'], state['exp_avg_sq'], - fp16_param_groups[group_id][param_id].data) - else: - self.ds_opt_adam.adam_update(self.opt_id, state['step'], group['lr'], beta1, beta2, group['eps'], - group['weight_decay'], group['bias_correction'], p.data, p.grad.data, - state['exp_avg'], state['exp_avg_sq']) + self.ds_opt_adam.adam_update(self.opt_id, state['step'], group['lr'], beta1, beta2, group['eps'], + group['weight_decay'], group['bias_correction'], p.data, p.grad.data, + state['exp_avg'], state['exp_avg_sq']) return loss diff --git a/deepspeed/ops/lion/cpu_lion.py b/deepspeed/ops/lion/cpu_lion.py index a91a00643873..03342a3fcd34 100755 --- a/deepspeed/ops/lion/cpu_lion.py +++ b/deepspeed/ops/lion/cpu_lion.py @@ -69,7 +69,7 @@ def __setstate__(self, state): group.setdefault('amsgrad', False) @torch.no_grad() - def step(self, closure=None, fp16_param_groups=None): + def step(self, closure=None): """Update the model parameters. .. note:: @@ -81,8 +81,6 @@ def step(self, closure=None, fp16_param_groups=None): Args: closure (callable, optional): closure to compute the loss. Defaults to ``None``. - fp16_param_groups: FP16 GPU parameters to update. Performing the - copy here reduces communication time. Defaults to ``None``. Returns: loss: if ``closure`` is provided. Otherwise ``None``. @@ -96,13 +94,6 @@ def step(self, closure=None, fp16_param_groups=None): # intended device for step device = torch.device('cpu') - # converting the fp16 params to a group of parameter - if type(fp16_param_groups) is list: - if type(fp16_param_groups[0]) is not list: - fp16_param_groups = [fp16_param_groups] - elif fp16_param_groups is not None: - fp16_param_groups = [[fp16_param_groups]] - for group_id, group in enumerate(self.param_groups): for param_id, p in enumerate(group['params']): @@ -131,11 +122,6 @@ def step(self, closure=None, fp16_param_groups=None): state['step'] += 1 beta1, beta2 = group['betas'] - if fp16_param_groups is not None: - self.ds_opt_lion.lion_update_copy(self.opt_id, state['step'], group['lr'], beta1, beta2, - group['weight_decay'], p.data, p.grad.data, state['exp_avg'], - fp16_param_groups[group_id][param_id].data) - else: - self.ds_opt_lion.lion_update(self.opt_id, state['step'], group['lr'], beta1, beta2, - group['weight_decay'], p.data, p.grad.data, state['exp_avg']) + self.ds_opt_lion.lion_update(self.opt_id, state['step'], group['lr'], beta1, beta2, + group['weight_decay'], p.data, p.grad.data, state['exp_avg']) return loss diff --git a/deepspeed/ops/transformer/inference/config.py b/deepspeed/ops/transformer/inference/config.py index d5aff4f541f7..f21bbc82cb52 100644 --- a/deepspeed/ops/transformer/inference/config.py +++ b/deepspeed/ops/transformer/inference/config.py @@ -101,7 +101,6 @@ def __init__(self, self.return_tuple = return_tuple self.mlp_after_attn = mlp_after_attn self.mlp_act_func_type = mlp_act_func_type - self.specialized_mode = False self.training_mp_size = training_mp_size self.bigscience_bloom = bigscience_bloom self.max_out_tokens = max_out_tokens diff --git a/deepspeed/ops/transformer/inference/diffusers_attention.py b/deepspeed/ops/transformer/inference/diffusers_attention.py index 5efc560db75e..c384ee77c03d 100644 --- a/deepspeed/ops/transformer/inference/diffusers_attention.py +++ b/deepspeed/ops/transformer/inference/diffusers_attention.py @@ -10,10 +10,11 @@ from packaging import version as pkg_version from deepspeed.utils.logging import log_dist from deepspeed.accelerator import get_accelerator -from deepspeed.ops.op_builder import InferenceBuilder +from deepspeed.ops.transformer.inference.op_binding.workspace import WorkspaceOp +from deepspeed.ops.transformer.inference.op_binding.softmax_context import SoftmaxContextOp +from deepspeed.ops.transformer.inference.op_binding import LinearOp +from deepspeed.ops.transformer.inference.op_binding.pad_transform import PadTransformOp -# Cuda modules will be imported if needed -inference_module = None minus_inf = -10000.0 triton_flash_attn = None @@ -36,7 +37,8 @@ class DeepSpeedDiffusersAttentionFunction(Function): @staticmethod def forward(ctx, input, context, input_mask, config, attn_qkvw, attn_qw, attn_kw, attn_vw, attn_qkvb, num_attention_heads_per_partition, norm_factor, hidden_size_per_partition, attn_ow, attn_ob, - do_out_bias, score_context_func, linear_func, triton_flash_attn_kernel, rope_theta): + do_out_bias, score_context_func, linear_func, pad_transform_func, triton_flash_attn_kernel, + rope_theta): def _transpose_for_context(x): x = x.permute(0, 2, 1, 3) @@ -77,7 +79,7 @@ def selfAttention_fp(input, context, input_mask): query = query.contiguous() key = key.contiguous() value = value.contiguous() - query, key, value = inference_module.pad_transform_fp16(query, key, value, config.heads, do_flash_attn) + query, key, value = pad_transform_func(query, key, value, config.heads, do_flash_attn) attention_scores = (torch.matmul(query, key.transpose(-1, -2)) * scale).softmax(dim=-1) context_layer = _transpose_for_context(torch.matmul(attention_scores, value)) @@ -117,10 +119,6 @@ def __init__( data_type = self.config.dtype data_type_fp = torch.half if self.config.dtype == torch.int8 else self.config.dtype - global inference_module - if inference_module is None: - builder = InferenceBuilder() - inference_module = builder.load() if DeepSpeedDiffusersAttention.layer_id == 1: log_dist(f"DeepSpeed-Attention config: {self.config.__dict__}", [0]) @@ -171,26 +169,24 @@ def __init__( self.norm_factor *= math.sqrt(self.config.layer_id + 1) # https://github.com/huggingface/transformers/blob/v4.24.0/src/transformers/models/gpt2/modeling_gpt2.py#L191 - if self.config.dtype in [torch.float16, torch.int8]: - self.score_context_func = inference_module.softmax_context_fp16 - self.linear_func = inference_module.linear_layer_fp16 - self.allocate_workspace = inference_module.allocate_workspace_fp16 - else: - self.score_context_func = inference_module.softmax_context_fp32 - self.linear_func = inference_module.linear_layer_fp32 - self.allocate_workspace = inference_module.allocate_workspace_fp32 + self.allocate_workspace_func = WorkspaceOp(self.config).allocate_workspace + self.score_context_func = SoftmaxContextOp(self.config) + self.linear_func = LinearOp(self.config) + self.pad_transform_func = PadTransformOp(self.config) - def forward(self, input, context=None, input_mask=None): + def allocate_workspace(self, size): + # Allocate memory only on first layer forward if self.config.layer_id == 0: - self.allocate_workspace(self.config.hidden_size, self.config.heads, - input.size()[1], - input.size()[0], DeepSpeedDiffusersAttention.layer_id, self.config.mp_size, False, - 0, self.config.max_out_tokens, self.config.min_out_tokens) - output = DeepSpeedDiffusersAttentionFunction.apply(input, context, input_mask, self.config, self.attn_qkvw, - self.attn_qw, self.attn_kw, self.attn_vw, self.attn_qkvb, - self.num_attention_heads_per_partition, self.norm_factor, - self.hidden_size_per_partition, self.attn_ow, self.attn_ob, - self.do_out_bias, self.score_context_func, self.linear_func, - self.triton_flash_attn_kernel, self.config.rope_theta) + self.allocate_workspace_func(self.config.hidden_size, self.config.heads, size[1], size[0], + DeepSpeedDiffusersAttention.layer_id, self.config.mp_size, False, 0, + self.config.max_out_tokens, self.config.min_out_tokens) + + def forward(self, input, context=None, input_mask=None): + self.allocate_workspace(input.size()) + output = DeepSpeedDiffusersAttentionFunction.apply( + input, context, input_mask, self.config, self.attn_qkvw, self.attn_qw, self.attn_kw, self.attn_vw, + self.attn_qkvb, self.num_attention_heads_per_partition, self.norm_factor, self.hidden_size_per_partition, + self.attn_ow, self.attn_ob, self.do_out_bias, self.score_context_func, self.linear_func, + self.pad_transform_func, self.triton_flash_attn_kernel, self.config.rope_theta) return output diff --git a/deepspeed/ops/transformer/inference/diffusers_transformer_block.py b/deepspeed/ops/transformer/inference/diffusers_transformer_block.py index b0156f905a06..d01638f36e40 100644 --- a/deepspeed/ops/transformer/inference/diffusers_transformer_block.py +++ b/deepspeed/ops/transformer/inference/diffusers_transformer_block.py @@ -10,26 +10,9 @@ from .diffusers_attention import DeepSpeedDiffusersAttention from .bias_add import nhwc_bias_add from .diffusers_2d_transformer import Diffusers2DTransformerConfig -from deepspeed.ops.op_builder import InferenceBuilder, SpatialInferenceBuilder from deepspeed.utils.types import ActivationFuncType - -# Ops will be loaded on demand -transformer_cuda_module = None -spatial_cuda_module = None - - -def load_transformer_module(): - global transformer_cuda_module - if transformer_cuda_module is None: - transformer_cuda_module = InferenceBuilder().load() - return transformer_cuda_module - - -def load_spatial_module(): - global spatial_cuda_module - if spatial_cuda_module is None: - spatial_cuda_module = SpatialInferenceBuilder().load() - return spatial_cuda_module +from .op_binding.gated_activation import GatedActivationOp +from .op_binding.layer_norm import LayerNormOp class DeepSpeedDiffusersTransformerBlock(nn.Module): @@ -76,8 +59,8 @@ def __init__(self, equivalent_module: nn.Module, config: Diffusers2DTransformerC else: self.attn_2_bias = nn.Paramaeter(torch.zeros_like(self.norm3_g), requires_grad=False) - self.transformer_cuda_module = load_transformer_module() - load_spatial_module() + self.gated_activation = GatedActivationOp() + self.layer_norm = LayerNormOp() def forward(self, hidden_states, context=None, timestep=None, **kwargs): # In v0.12.0 of diffuser, several new kwargs were added. Capturing @@ -88,17 +71,17 @@ def forward(self, hidden_states, context=None, timestep=None, **kwargs): if "encoder_hidden_states" in kwargs and kwargs["encoder_hidden_states"] is not None: context = kwargs["encoder_hidden_states"] - out_norm_1 = self.transformer_cuda_module.layer_norm(hidden_states, self.norm1_g, self.norm1_b, self.norm1_eps) + out_norm_1 = self.layer_norm(hidden_states, self.norm1_g, self.norm1_b, self.norm1_eps) out_attn_1 = self.attn_1(out_norm_1) - out_norm_2, out_attn_1 = self.transformer_cuda_module.layer_norm_residual_store_pre_ln_res( + out_norm_2, out_attn_1 = self.layer_norm.layer_norm_residual_store_pre_ln_res( out_attn_1, self.attn_1_bias, hidden_states, self.norm2_g, self.norm2_b, self.norm2_eps) out_attn_2 = self.attn_2(out_norm_2, context=context) - out_norm_3, out_attn_2 = self.transformer_cuda_module.layer_norm_residual_store_pre_ln_res( + out_norm_3, out_attn_2 = self.layer_norm.layer_norm_residual_store_pre_ln_res( out_attn_2, self.attn_2_bias, out_attn_1, self.norm3_g, self.norm3_b, self.norm3_eps) out_ff1 = nn.functional.linear(out_norm_3, self.ff1_w) - out_geglu = self.transformer_cuda_module.gated_activation(out_ff1, self.ff1_b, ActivationFuncType.GATED_GELU) + out_geglu = self.gated_activation(out_ff1, self.ff1_b, ActivationFuncType.GATED_GELU) out_ff2 = nn.functional.linear(out_geglu, self.ff2_w) return nhwc_bias_add(out_ff2, self.ff2_b, other=out_attn_2) diff --git a/deepspeed/ops/transformer/inference/ds_attention.py b/deepspeed/ops/transformer/inference/ds_attention.py index eb6ce2f75c69..ddfb8ed8e8f9 100644 --- a/deepspeed/ops/transformer/inference/ds_attention.py +++ b/deepspeed/ops/transformer/inference/ds_attention.py @@ -89,11 +89,11 @@ def __init__(self, config, mp_group=None, q_scales=None, q_groups=1, merge_count torch.empty(self.hidden_size_per_partition * 3, dtype=data_type_fp, device=device) ] - def compute_attention(self, qkv_out, input_mask, layer_past, alibi): + def compute_attention(self, qkv_out, input_mask, layer_past, alibi, is_prompt, token_idx, position_ids): if isinstance(qkv_out, list) or isinstance(qkv_out, tuple): qkv_out = qkv_out[0] - no_masking = input_mask is None + no_masking = input_mask is None or input_mask is False if no_masking: input_mask = torch.empty(1) @@ -108,7 +108,10 @@ def compute_attention(self, qkv_out, input_mask, layer_past, alibi): no_masking=no_masking, layer_id=self.config.layer_id, num_layers=DeepSpeedSelfAttention.num_layers, - alibi=alibi) + alibi=alibi, + is_prompt=is_prompt, + token_idx=token_idx, + position_ids=position_ids) context_layer, key_layer, value_layer = attn_key_value return context_layer, key_layer, value_layer @@ -136,7 +139,8 @@ def forward(self, output_attentions=False, norm_w=None, norm_b=None, - alibi=None): + alibi=None, + **kwargs): if self.attn_qkvw is None: self._attn_qkvw, self._attn_qkvb = self._merge_qkv() else: @@ -157,10 +161,17 @@ def forward(self, gamma=norm_w, beta=norm_b) + is_prompt = kwargs.get("first_token", qkv_out[0].shape[1] > 1) + token_idx = kwargs.get("token_idx", None) + position_ids = kwargs.get("position_ids", None) + context_layer, key_layer, value_layer = self.compute_attention(qkv_out=qkv_out, input_mask=input_mask, layer_past=layer_past, - alibi=alibi) + alibi=alibi, + is_prompt=is_prompt, + token_idx=token_idx, + position_ids=position_ids) output = self.vector_matmul_func(input=context_layer, weight=self.attn_ow) inp_norm = qkv_out[-1] @@ -210,7 +221,7 @@ def _split_tensor_along_last_dim(self, tensor, num_partitions, contiguous_split_ return tensor_list - def compute_attention(self, qkv_out, input_mask, layer_past, alibi): + def compute_attention(self, qkv_out, input_mask, layer_past, alibi, is_prompt, token_idx, position_ids): if isinstance(qkv_out, list) or isinstance(qkv_out, tuple): qkv_out = qkv_out[0] @@ -246,8 +257,10 @@ def compute_attention(self, qkv_out, input_mask, layer_past, alibi): matmul_result = torch.matmul(query_layer, key_layer) # change view to [batch_size, num_heads, q_length, k_length] attention_scores = matmul_result.view(output_size[0], output_size[1], output_size[2], -1) - - offset = dist.get_rank() * self.num_attention_heads_per_partition if dist.is_initialized() else 0 + if self.config.mp_size > 1 and dist.is_initialized(): + offset = dist.get_rank() * self.num_attention_heads_per_partition + else: + offset = 0 target_dtype = torch.float16 if self.config.dtype == torch.int8 else self.config.dtype # When using the hybrid engine with BLOOM, input_mask needs to be converted from torch.bool -> torch.int64 @@ -255,7 +268,7 @@ def compute_attention(self, qkv_out, input_mask, layer_past, alibi): input_mask = input_mask.long() attention_probs = self.softmax_func(attn_scores=attention_scores, - attn_mask=((1 - input_mask).to(target_dtype) * minus_inf), + attn_mask=input_mask.to(target_dtype) * minus_inf, alibi=alibi, triangular=(self.config.triangular_masking and (attention_scores.shape[-2] > 1)), diff --git a/deepspeed/ops/transformer/inference/moe_inference.py b/deepspeed/ops/transformer/inference/moe_inference.py index 90bfcae81bf2..da3981c13714 100644 --- a/deepspeed/ops/transformer/inference/moe_inference.py +++ b/deepspeed/ops/transformer/inference/moe_inference.py @@ -7,16 +7,16 @@ import math import torch from torch.autograd import Function -# accelerator modules will be imported if needed -inference_module = None -specialized_mode = None import torch.nn as nn from .ds_attention import DeepSpeedSelfAttention from .config import DeepSpeedInferenceConfig +from .op_binding import SoftmaxOp, VectorMatMulOp, GELUGemmOp +from .op_binding.bias_residual import BiasResidualOp +from .op_binding.einsum_sec_sm_ecm import EinsumSecSmEcmOp +from .op_binding.layer_norm import LayerNormOp from ....moe.sharded_moe import TopKGate from deepspeed import comm as dist -from deepspeed.accelerator import get_accelerator -from deepspeed.ops.op_builder import InferenceBuilder +from .op_binding.moe_res_matmul import MoEResMatmulOp class DeepSpeedMoEInferenceConfig(DeepSpeedInferenceConfig): @@ -110,16 +110,13 @@ class DeepSpeedMLPFunction(Function): @staticmethod def forward(ctx, input, inter_w, inter_b, config, output_b, output_w, q_scales, q_groups, merge_count, mp_group, - async_op): + async_op, gelu_gemm_func, vector_matmul_func): if config.q_int8: - intermediate = inference_module.fused_gemm_gelu_int8(input, inter_w, inter_b, config.epsilon, q_scales[2], - (q_groups * (2**merge_count)), config.pre_layer_norm) - output = inference_module.vector_matmul_int8(intermediate, output_w, q_scales[3], q_groups, (merge_count)) + intermediate = gelu_gemm_func(input, inter_w, inter_b, config.epsilon, q_scales[2], + (q_groups * (2**merge_count)), config.pre_layer_norm) + output = vector_matmul_func(intermediate, output_w, q_scales[3], q_groups, (merge_count)) else: - mlp_gemm_func = inference_module.fused_gemm_gelu_fp16 if config.fp16 else \ - inference_module.fused_gemm_gelu_fp32 - - output = mlp_gemm_func(input, inter_w, inter_b, output_w, config.epsilon, config.pre_layer_norm, async_op) + output = gelu_gemm_func(input, inter_w, inter_b, output_w, config.epsilon, config.pre_layer_norm, async_op) if mp_group is not None and dist.get_world_size(group=mp_group) > 1: dist.all_reduce(output, group=mp_group, async_op=async_op) @@ -150,10 +147,13 @@ def __init__(self, config, q_scales=None, q_groups=1, merge_count=1, mlp_extra_g self.q_groups = q_groups * 2 if mlp_extra_grouping else q_groups self.merge_count = int(math.log2(merge_count)) self.mp_group = mp_group + self.gelu_gemm_func = GELUGemmOp(self.config) + self.vector_matmul_func = VectorMatMulOp(self.config) def forward(self, input, async_op=False): return DeepSpeedMLPFunction.apply(input, self.inter_w, self.inter_b, self.config, self.output_b, self.output_w, - self.q_scales, self.q_groups, self.merge_count, self.mp_group, async_op) + self.q_scales, self.q_groups, self.merge_count, self.mp_group, async_op, + self.gelu_gemm_func, self.vector_matmul_func) class DeepSpeedMoEInference(nn.Module): @@ -187,18 +187,7 @@ def __init__(self, self.config = config self.config.layer_id = DeepSpeedMoEInference.layer_id - global inference_module - global specialized_mode - if inference_module is None: - specialized_mode = False - # InferenceSpecializedBuilder is not among DeepSpeed provided builder yet, so we infer by builder name string - builder = get_accelerator().create_op_builder("InferenceSpecializedBuilder") - if builder is not None and builder.is_compatible(): - inference_module = builder.load() - specialized_mode = True - else: - inference_module = InferenceBuilder().load() - self.config.specialized_mode = specialized_mode + assert self.config.dtype != torch.bfloat16, "DeepSpeed MoE Transformer Inference not yet tested for bfloat support" DeepSpeedMoEInference.layer_id += 1 @@ -213,10 +202,8 @@ def __init__(self, self.res_mlp = DeepSpeedMoEMLP(config, quantize_scales, quantize_groups, merge_count, mlp_extra_grouping, mp_group) self.res_coef = nn.Parameter(torch.Tensor(self.config.hidden_size, 2)) - self.coef_func = inference_module.softmax_fp16 if self.config.dtype in [torch.float16, torch.int8] else \ - inference_module.softmax_fp32 - self.vector_matmul_func = inference_module.vector_matmul_fp16 if self.config.dtype == torch.float16 else \ - inference_module.vector_matmul_fp32 + self.coef_func = SoftmaxOp(self.config) + self.vector_matmul_func = VectorMatMulOp(self.config) config.mp_size = 1 self.mlp = nn.ModuleList( @@ -226,7 +213,7 @@ def __init__(self, self.moe_gate = TopKGate(self.config.hidden_size, self.config.global_experts, self.config.k, self.config.capacity_factor, self.config.eval_capacity_factor, self.config.min_capacity, self.config.noisy_gate_policy, self.config.drop_tokens, - self.config.use_rts) + self.config.use_rts, self.ep_group) self.ep_group = ep_group self.mp_group = mp_group @@ -234,12 +221,10 @@ def __init__(self, print("DeepSpeed MoE Transformer Inference config is ", self.config.__dict__) - self.bias_residual_func = inference_module.bias_residual_fp16 if self.config.dtype in [torch.float16, torch.int8] else \ - inference_module.bias_residual_fp32 - self.ds_layernorm = inference_module.layer_norm_fp16 if self.config.dtype in [torch.float16, torch.int8] else \ - inference_module.layer_norm_fp32 - self.einsum_sec_sm_ecm = inference_module.einsum_sec_sm_ecm_fp16 if self.config.dtype in [torch.float16, torch.int8] else \ - inference_module.einsum_sec_sm_ecm_fp32 + self.bias_residual_func = BiasResidualOp(self.config) + self.ds_layernorm = LayerNormOp(self.config) + self.einsum_sec_sm_ecm = EinsumSecSmEcmOp(self.config) + self.moe_res_matmul = MoEResMatmulOp(self.config) def res_coef_func(self, inp, async_op): inp = self.vector_matmul_func(inp, self.res_coef, async_op) @@ -346,7 +331,7 @@ def forward(self, dim=0)[dist.get_rank(group=self.expert_mp_group)] if self.config.mlp_type == 'residual': - inference_module.moe_res_matmul(res_mlp_out, res_coef_out, output) + self.moe_res_matmul(res_mlp_out, res_coef_out, output) output = self.bias_residual_func(output, residual_add, torch.empty(1)) diff --git a/deepspeed/ops/transformer/inference/op_binding/bias_add.py b/deepspeed/ops/transformer/inference/op_binding/bias_add.py new file mode 100644 index 000000000000..d2ae38f546eb --- /dev/null +++ b/deepspeed/ops/transformer/inference/op_binding/bias_add.py @@ -0,0 +1,31 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch +from ..config import DeepSpeedInferenceConfig +from .base import BaseOp + + +class BiasAddOp(BaseOp): + + def __init__(self, config: DeepSpeedInferenceConfig): + super(BiasAddOp, self).__init__(config) + + try: + if self.config.dtype == torch.float16: + self.bias_add_func = self.inference_module.bias_add_fp16 + elif self.config.dtype == torch.bfloat16: + self.bias_add_func = self.inference_module.bias_add_bf16 + else: + self.bias_add_func = self.inference_module.bias_add_fp32 + except AttributeError: + self.bias_add_func = self.bias_add_fallback + + @classmethod + def bias_add_fallback(cls, input, bias): + return torch.add(input, bias) + + def forward(self, activation: torch.Tensor, bias: torch.Tensor): + return self.bias_add_func(activation, bias) diff --git a/deepspeed/ops/transformer/inference/op_binding/bias_gelu.py b/deepspeed/ops/transformer/inference/op_binding/bias_gelu.py new file mode 100644 index 000000000000..f0fee0b0d06e --- /dev/null +++ b/deepspeed/ops/transformer/inference/op_binding/bias_gelu.py @@ -0,0 +1,33 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch +import torch.nn.functional as F +from ..config import DeepSpeedInferenceConfig +from .base import BaseOp + + +class BiasGeluOp(BaseOp): + + def __init__(self, config: DeepSpeedInferenceConfig): + super(BiasGeluOp, self).__init__(config) + + try: + if self.config.dtype == torch.float16: + self.bias_gelu_func = self.inference_module.bias_gelu_fp16 + elif self.config.dtype == torch.bfloat16: + self.bias_gelu_func = self.inference_module.bias_gelu_bf16 + else: + self.bias_gelu_func = self.inference_module.bias_gelu_fp32 + except AttributeError: + self.bias_gelu_func = self.bias_gelu_fallback + + @classmethod + def bias_gelu_fallback(cls, activations, bias): + # Expected behavior is that of casting to float32 internally and using the tanh approximation + return F.gelu(activations.to(torch.float32) + bias.to(torch.float32), approximate='tanh').to(activations.dtype) + + def forward(self, activation: torch.Tensor, bias: torch.Tensor): + return self.bias_gelu_func(activation, bias) diff --git a/deepspeed/ops/transformer/inference/op_binding/bias_relu.py b/deepspeed/ops/transformer/inference/op_binding/bias_relu.py new file mode 100644 index 000000000000..ccfade1d9524 --- /dev/null +++ b/deepspeed/ops/transformer/inference/op_binding/bias_relu.py @@ -0,0 +1,33 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch +import torch.nn.functional as F +from ..config import DeepSpeedInferenceConfig +from .base import BaseOp + + +class BiasReluOp(BaseOp): + + def __init__(self, config: DeepSpeedInferenceConfig): + super(BiasReluOp, self).__init__(config) + + try: + if self.config.dtype == torch.float16: + self.bias_relu_func = self.inference_module.bias_relu_fp16 + elif self.config.dtype == torch.bfloat16: + self.bias_relu_func = self.inference_module.bias_relu_bf16 + else: + self.bias_relu_func = self.inference_module.bias_relu_fp32 + except AttributeError: + self.bias_relu_func = self.bias_relu_fallback + + @classmethod + def bias_relu_fallback(cls, activations, bias): + # Expected behavior is that of casting to float32 internally + return F.relu(activations.to(torch.float32) + bias.to(torch.float32)).to(activations.dtype) + + def forward(self, activation: torch.Tensor, bias: torch.Tensor): + return self.bias_relu_func(activation, bias) diff --git a/deepspeed/ops/transformer/inference/op_binding/bias_residual.py b/deepspeed/ops/transformer/inference/op_binding/bias_residual.py new file mode 100644 index 000000000000..ecad50e10ffe --- /dev/null +++ b/deepspeed/ops/transformer/inference/op_binding/bias_residual.py @@ -0,0 +1,29 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch +from ..config import DeepSpeedInferenceConfig +from .base import BaseOp + + +class BiasResidualOp(BaseOp): + + def __init__(self, config: DeepSpeedInferenceConfig): + super(BiasResidualOp, self).__init__(config) + + try: + if self.config.dtype in [torch.float16, torch.int8]: + self.bias_residual_func = self.inference_module.bias_residual_fp16 + else: + self.bias_residual_func = self.inference_module.bias_residual_fp32 + except AttributeError: + self.bias_residual_func = self.bias_residual_fallback + + @classmethod + def bias_residual_fallback(cls, output, residual, bias): + raise NotImplementedError("bias residual fallback isn't implemented") + + def forward(self, output, residual, bias): + return self.bias_residual_func(output, residual, bias) diff --git a/deepspeed/ops/transformer/inference/op_binding/einsum_sec_sm_ecm.py b/deepspeed/ops/transformer/inference/op_binding/einsum_sec_sm_ecm.py new file mode 100644 index 000000000000..f34b10f786d1 --- /dev/null +++ b/deepspeed/ops/transformer/inference/op_binding/einsum_sec_sm_ecm.py @@ -0,0 +1,29 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch +from ..config import DeepSpeedInferenceConfig +from .base import BaseOp + + +class EinsumSecSmEcmOp(BaseOp): + + def __init__(self, config: DeepSpeedInferenceConfig): + super(EinsumSecSmEcmOp, self).__init__(config) + + try: + if self.config.dtype in [torch.float16, torch.int8]: + self.einsum_sec_sm_ecm_func = self.inference_module.einsum_sec_sm_ecm_fp16 + else: + self.einsum_sec_sm_ecm_func = self.inference_module.einsum_sec_sm_ecm_fp32 + except AttributeError: + self.einsum_sec_sm_ecm_func = self.einsum_sec_sm_ecm_fallback + + @classmethod + def einsum_sec_sm_ecm_fallback(cls, Q, W): + raise NotImplementedError("einsum sec sm ecm fallback isn't implemented") + + def forward(self, Q, W): + return self.einsum_sec_sm_ecm_func(Q, W) diff --git a/deepspeed/ops/transformer/inference/op_binding/gated_activation.py b/deepspeed/ops/transformer/inference/op_binding/gated_activation.py new file mode 100644 index 000000000000..d28d818ce4b3 --- /dev/null +++ b/deepspeed/ops/transformer/inference/op_binding/gated_activation.py @@ -0,0 +1,40 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch +import torch.nn.functional as F +from deepspeed.utils.types import ActivationFuncType +from ..config import DeepSpeedInferenceConfig +from .base import BaseOp + + +class GatedActivationOp(BaseOp): + + def __init__(self, config: DeepSpeedInferenceConfig = None): + if config is None: + config = DeepSpeedInferenceConfig() + super(GatedActivationOp, self).__init__(config) + try: + self.gated_activation_func = self.inference_module.gated_activation + except AttributeError: + self.gated_activation_func = self.gated_activation_fallback + + @classmethod + def gated_activation_fallback(cls, activation, bias, activation_func_type): + # Expected behavior is that of casting to float32 internally + # Explicitly using the default GeLU + activation_func = None + activations = activation + bias.reshape(1, 1, -1) + hidden_states, gate = activations.chunk(2, dim=-1) + + if activation_func_type == ActivationFuncType.GATED_SILU: + activation_func = F.silu + elif activation_func_type == ActivationFuncType.GATED_GELU: + activation_func = F.gelu + + return hidden_states * activation_func(gate.to(torch.float32)).to(activations.dtype) + + def forward(self, activation: torch.Tensor, bias: torch.Tensor, activation_func_type: ActivationFuncType): + return self.gated_activation_func(activation, bias, activation_func_type) diff --git a/deepspeed/ops/transformer/inference/op_binding/gelu_gemm.py b/deepspeed/ops/transformer/inference/op_binding/gelu_gemm.py index 63323c150752..60bbb4b48bdb 100644 --- a/deepspeed/ops/transformer/inference/op_binding/gelu_gemm.py +++ b/deepspeed/ops/transformer/inference/op_binding/gelu_gemm.py @@ -4,6 +4,7 @@ # DeepSpeed Team import torch +import torch.nn.functional as F from ..config import DeepSpeedInferenceConfig from .base import BaseOp import deepspeed @@ -14,7 +15,9 @@ class GELUGemmOp(BaseOp): def __init__(self, config: DeepSpeedInferenceConfig): super(GELUGemmOp, self).__init__(config) try: - if self.config.dtype in [torch.float16, torch.int8]: + if self.config.dtype == torch.int8: + self.fused_gemm_gelu = self.inference_module.fused_gemm_gelu_int8 + elif self.config.dtype == torch.float16: if deepspeed.HAS_TRITON and self.config.use_triton and self.config.dtype == torch.float16: from deepspeed.ops.transformer.inference.triton.ops import fused_gemm_gelu as _triton_fused_gemm_gelu self.fused_gemm_gelu = _triton_fused_gemm_gelu # type: ignore @@ -28,7 +31,11 @@ def __init__(self, config: DeepSpeedInferenceConfig): self.fused_gemm_gelu = self.gelu_gemm_fallback def gelu_gemm_fallback(self, input, weight, scale, bias, out, out_scale, dtype, transpose): - raise NotImplementedError + tmp = torch.matmul(input, weight) + tmp = F.gelu(tmp.to(torch.float32) + bias.to(torch.float32), approximate="tanh").to(tmp.dtype) + output = torch.matmul(tmp, out) + + return output def forward(self, input: torch.Tensor, weight: torch.Tensor, bias: torch.Tensor, weight_out: torch.Tensor): diff --git a/deepspeed/ops/transformer/inference/op_binding/layer_norm.py b/deepspeed/ops/transformer/inference/op_binding/layer_norm.py new file mode 100644 index 000000000000..31219a58ac3c --- /dev/null +++ b/deepspeed/ops/transformer/inference/op_binding/layer_norm.py @@ -0,0 +1,60 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch +import torch.nn.functional as F +from ..config import DeepSpeedInferenceConfig +from .base import BaseOp + + +class LayerNormOp(BaseOp): + + def __init__(self, config: DeepSpeedInferenceConfig = None): + super(LayerNormOp, self).__init__(config) + try: + if config is None: + self.layer_norm_func = self.inference_module.layer_norm + elif self.config.dtype in [torch.float16, torch.int8]: + self.layer_norm_func = self.inference_module.layer_norm_fp16 + else: + self.layer_norm_func = self.inference_module.layer_norm_fp32 + except AttributeError: + self.layer_norm_func = self.layer_norm_fallback + + @classmethod + def layer_norm_residual(cls, vals, bias, res, gamma, beta, epsilon): + channels = gamma.shape[0] + dtype = gamma.dtype + vals_f = vals.to(torch.float32) + bias_f = bias.to(torch.float32).reshape(1, 1, -1) + res_f = res.to(torch.float32) + gamma_f = gamma.to(torch.float32) + beta_f = beta.to(torch.float32) + return F.layer_norm(vals_f + bias_f + res_f, (channels, ), weight=gamma_f, bias=beta_f, eps=epsilon).to(dtype) + + @classmethod + def layer_norm_residual_store_pre_ln_res(cls, vals, bias, res, gamma, beta, epsilon): + channels = gamma.shape[0] + dtype = gamma.dtype + vals_f = vals.to(torch.float32) + bias_f = bias.to(torch.float32).reshape(1, 1, -1) + res_f = res.to(torch.float32) + gamma_f = gamma.to(torch.float32) + beta_f = beta.to(torch.float32) + res_output = vals_f + bias_f + res_f + norm_output = F.layer_norm(res_output, (channels, ), weight=gamma_f, bias=beta_f, eps=epsilon).to(dtype) + return norm_output, res_output.to(dtype) + + @classmethod + def layer_norm_fallback(cls, vals, gamma, beta, epsilon): + channels = gamma.shape[0] + dtype = gamma.dtype + vals_f = vals.to(torch.float32) + gamma_f = gamma.to(torch.float32) + beta_f = beta.to(torch.float32) + return F.layer_norm(vals_f, (channels, ), weight=gamma_f, bias=beta_f, eps=epsilon).to(dtype) + + def forward(self, vals, gamma, beta, epsilon): + return self.layer_norm_func(vals, gamma, beta, epsilon) diff --git a/deepspeed/ops/transformer/inference/op_binding/mlp_gemm.py b/deepspeed/ops/transformer/inference/op_binding/mlp_gemm.py index 3064c00d1755..97daf8b74bd8 100644 --- a/deepspeed/ops/transformer/inference/op_binding/mlp_gemm.py +++ b/deepspeed/ops/transformer/inference/op_binding/mlp_gemm.py @@ -5,12 +5,12 @@ from typing import Optional -import os import torch import torch.nn.functional as F from ..config import DeepSpeedInferenceConfig from .base import BaseOp from deepspeed.utils.types import NormType +from .pre_rms_norm import PreRMSNormOp class MLPGemmOp(BaseOp): @@ -39,23 +39,46 @@ def __init__(self, config: DeepSpeedInferenceConfig): self.mlp_gemm_func = self.mlp_gemm_fallback elif self.config.norm_type == NormType.RMSNorm: self.mlp_gemm_func = self.rms_mlp_gemm_fallback + self.pre_rms_norm = PreRMSNormOp() def mlp_gemm_fallback(self, input, residual, input_bias, weight_interm, weight_out, bias, gamma, beta, eps, pre_layer_norm, mlp_after_attn, interm_scale, out_scale, dtype, mlp_act_func_type, transpose): - if os.environ.get('DS_KI_FALLBACK') == 'True' and mlp_after_attn and not transpose: - residual_add = F.layer_norm(input + residual + input_bias, (input.shape[2], ), gamma, beta, - self.config.epsilon) - tmp = torch.matmul(residual_add, weight_interm) + if mlp_after_attn: + residual_add = F.layer_norm(input + residual + input_bias, (input.shape[2], ), gamma, beta, eps) + tmp = torch.matmul(residual_add, weight_interm.t() if transpose else weight_interm) tmp = F.gelu(tmp + bias) - output = torch.matmul(tmp, weight_out) - return (output, residual_add) + output = torch.matmul(tmp, weight_out.t() if transpose else weight_out) + + return output, residual_add else: + # TODO: SW-151870 implement mlp_gemm_fallback raise NotImplementedError def rms_mlp_gemm_fallback(self, input, residual, weight_interm, weight_out, gamma, eps, interm_scale, out_scale, dtype, mlp_act_func_type, transpose): - raise NotImplementedError + inp_norm, residual = self.pre_rms_norm(input, residual, gamma, eps) + tmp = torch.matmul(inp_norm.view([-1, inp_norm.size(2)]), weight_interm.t() if transpose else weight_interm) + up_proj, gate_proj = tmp.chunk(2, dim=1) + + from deepspeed.utils.types import ActivationFuncType + if mlp_act_func_type == ActivationFuncType.GELU: + intermediate = F.gelu(gate_proj) + elif mlp_act_func_type == ActivationFuncType.ReLU: + intermediate = F.relu(gate_proj) + elif mlp_act_func_type == ActivationFuncType.GATED_GELU: + intermediate = F.gelu(gate_proj) + elif mlp_act_func_type == ActivationFuncType.GATED_SILU: + intermediate = F.silu(gate_proj) + else: + raise f"rms_mlp_gemm_fallback not implemented for activation type {mlp_act_func_type}" + + intermediate = intermediate * up_proj + + output = torch.matmul(intermediate, weight_out.t() if transpose else weight_out) + output = output.view([input.size(0), input.size(1), -1]) + + return [output, residual] def forward(self, input: torch.Tensor, diff --git a/deepspeed/ops/transformer/inference/op_binding/moe_res_matmul.py b/deepspeed/ops/transformer/inference/op_binding/moe_res_matmul.py new file mode 100644 index 000000000000..ef3558c8bc88 --- /dev/null +++ b/deepspeed/ops/transformer/inference/op_binding/moe_res_matmul.py @@ -0,0 +1,29 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch +from ..config import DeepSpeedInferenceConfig +from .base import BaseOp + + +class MoEResMatmulOp(BaseOp): + + def __init__(self, config: DeepSpeedInferenceConfig = None): + if config is None: + config = DeepSpeedInferenceConfig() + super(MoEResMatmulOp, self).__init__(config) + try: + self.moe_res_matmul_func = self.inference_module.moe_res_matmul + except AttributeError: + self.moe_res_matmul_func = self.moe_res_matmul_fallback + + @classmethod + def moe_res_matmul_fallback(cls, residual, coef, output): + coef_t = coef.transpose(1, 2).contiguous() + coef1, coef2 = torch.split(coef_t, split_size_or_sections=coef_t.shape[len(coef_t.shape) - 1] // 2, dim=-1) + return residual * coef1 + output * coef2 + + def forward(self, residual, coef, output): + return self.moe_res_matmul_func(residual, coef, output) diff --git a/deepspeed/ops/transformer/inference/op_binding/pad_transform.py b/deepspeed/ops/transformer/inference/op_binding/pad_transform.py new file mode 100644 index 000000000000..876fefc3bcfb --- /dev/null +++ b/deepspeed/ops/transformer/inference/op_binding/pad_transform.py @@ -0,0 +1,26 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from ..config import DeepSpeedInferenceConfig +from .base import BaseOp + + +class PadTransformOp(BaseOp): + + def __init__(self, config: DeepSpeedInferenceConfig = None): + if config is None: + config = DeepSpeedInferenceConfig() + super(PadTransformOp, self).__init__(config) + try: + self.pad_transform_func = self.inference_module.pad_transform_fp16 + except AttributeError: + self.pad_transform_func = self.pad_transform_fallback + + @staticmethod + def pad_transform_fallback(query, key, value, heads, do_flash_attn): + raise NotImplementedError("pad_transform fallback is not implemented.") + + def forward(self, query, key, value, heads, do_flash_attn): + return self.pad_transform_func(query, key, value, heads, do_flash_attn) diff --git a/deepspeed/ops/transformer/inference/op_binding/pre_rms_norm.py b/deepspeed/ops/transformer/inference/op_binding/pre_rms_norm.py new file mode 100644 index 000000000000..7969d20f0527 --- /dev/null +++ b/deepspeed/ops/transformer/inference/op_binding/pre_rms_norm.py @@ -0,0 +1,31 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch +from ..config import DeepSpeedInferenceConfig +from .base import BaseOp +from .rms_norm import RMSNormOp + + +class PreRMSNormOp(BaseOp): + + def __init__(self, config: DeepSpeedInferenceConfig = None): + if config is None: + config = DeepSpeedInferenceConfig() + super(PreRMSNormOp, self).__init__(config) + try: + self.pre_rms_norm_func = self.inference_module.pre_rms_norm + except AttributeError: + self.pre_rms_norm_func = self.pre_rms_norm_fallback + + @staticmethod + def pre_rms_norm_fallback(vals, residual, gamma, epsilon): + residual = vals.to(torch.float32) + residual.to(torch.float32) + vals = residual + + return RMSNormOp.rms_norm_fallback(vals, gamma, epsilon), residual.to(gamma.dtype) + + def forward(self, vals, residual, gamma, epsilon): + return self.pre_rms_norm_func(vals, residual, gamma, epsilon) diff --git a/deepspeed/ops/transformer/inference/op_binding/qkv_gemm.py b/deepspeed/ops/transformer/inference/op_binding/qkv_gemm.py index 250bf9864e1e..9ff5366fae5d 100644 --- a/deepspeed/ops/transformer/inference/op_binding/qkv_gemm.py +++ b/deepspeed/ops/transformer/inference/op_binding/qkv_gemm.py @@ -3,11 +3,11 @@ # DeepSpeed Team -import os import torch import torch.nn.functional as F from ..config import DeepSpeedInferenceConfig from .base import BaseOp +from .rms_norm import RMSNormOp import deepspeed from deepspeed.utils.types import NormType @@ -56,19 +56,23 @@ def _triton_autotune(min_seqlen, max_seqlen, hidden_size, dtype=torch.float16): matmul(A, B) Fp16Matmul._update_autotune_table() - def qkv_gemm_fallback(self, input, weight, q_scale, bias, gamma, beta, eps, add_bias, q_int8, transpose): - if os.environ.get('DS_KI_FALLBACK') == 'True' and not transpose: - inp_norm = F.layer_norm(input, (input.shape[2], ), gamma, beta, eps) - tmp = torch.matmul(inp_norm, weight) - if add_bias: - tmp += bias - output = [tmp, inp_norm] - return output - else: - raise NotImplementedError + @staticmethod + def qkv_gemm_fallback(input, weight, q_scale, bias, gamma, beta, eps, add_bias, q_int8, transpose): + inp_norm = F.layer_norm(input, (input.shape[2], ), gamma, beta, eps) + tmp = torch.matmul(inp_norm, weight.t() if transpose else weight) + if add_bias: + tmp += bias + output = [tmp, inp_norm] + + return output + + @staticmethod + def rms_qkv_gemm_fallback(input, weight, q_scale, gamma, eps, q_int8, transpose): + inp_norm = RMSNormOp.rms_norm_fallback(input, gamma, eps) + tmp = torch.matmul(inp_norm, weight.t() if transpose else weight) + output = [tmp, inp_norm] - def rms_qkv_gemm_fallback(self, input, weight, q_scale, gamma, eps, q_int8, transpose): - raise NotImplementedError + return output def forward(self, input: torch.Tensor, weight: torch.Tensor, bias: torch.Tensor, gamma: torch.Tensor, beta: torch.Tensor): diff --git a/deepspeed/ops/transformer/inference/op_binding/residual_add.py b/deepspeed/ops/transformer/inference/op_binding/residual_add.py index 6f9b35cbc05d..93b229c5d1ac 100644 --- a/deepspeed/ops/transformer/inference/op_binding/residual_add.py +++ b/deepspeed/ops/transformer/inference/op_binding/residual_add.py @@ -3,9 +3,10 @@ # DeepSpeed Team -import os import torch from typing import Optional + +from .vector_add import VectorAddOp from ..config import DeepSpeedInferenceConfig from .base import BaseOp @@ -22,11 +23,32 @@ def __init__(self, config: DeepSpeedInferenceConfig): else: self.residual_add_func = self.inference_module.residual_add_bias_fp32 except AttributeError: - self.residual_add_func = None - try: - self._vector_add = self.inference_module._vector_add - except AttributeError: - self._vector_add = None + self.residual_add_func = self.residual_add_fallback + self.vector_add = VectorAddOp() + + @staticmethod + def res_add_bias(hidden_state, residual, attn_output, attn_bias, final_bias, add_attn_bias, mp_size): + hidden_state += attn_output + (residual + final_bias) / mp_size + if add_attn_bias: + hidden_state += attn_bias / mp_size + + return hidden_state + + @staticmethod + def residual_add_fallback(hidden_state, residual, attention_output, attention_bias, final_bias, mp_size, + mlp_after_attn, add_bias, pre_layer_norm): + if mlp_after_attn: + if pre_layer_norm: + tmp = (residual.float() + attention_output.float() + attention_bias.float() + + final_bias.float()) / mp_size + hidden_state.float() + else: + tmp = residual.float() + hidden_state.float() + final_bias.float() + else: + tmp = ResidualAddOp.res_add_bias(hidden_state, residual, attention_output, attention_bias, final_bias, + add_bias, mp_size) + residual.copy_(tmp.to(hidden_state.dtype)) + + return residual def forward(self, hidden_state: torch.Tensor, @@ -37,28 +59,15 @@ def forward(self, attention_bias: Optional[torch.Tensor] = None, final_bias: Optional[torch.Tensor] = None): - if self.residual_add_func is not None: - if final_bias is None: - residual = self._vector_add(residual, hidden_state, 1.0 / self.config.mp_size) - else: - if not self.config.pre_layer_norm and residual_add is not None: - # only use residual add if its set and we are not pre layer norm - residual = residual_add - - self.residual_add_func(hidden_state, residual, attention_output, attention_bias, final_bias, - self.config.mp_size, self.config.mlp_after_attn, add_bias, - self.config.pre_layer_norm) + if final_bias is None and attention_bias is None: + residual = self.vector_add(residual + attention_output, hidden_state, 1.0 / self.config.mp_size) else: - # fallback - if os.environ.get('DS_KI_FALLBACK') == 'True' and self.config.mlp_after_attn: - if self.config.pre_layer_norm: - tmp = (residual.float() + attention_output.float() + attention_bias.float() + - final_bias.float()) / self.config.mp_size + hidden_state.float() - else: - tmp = residual.float() + hidden_state.float() + final_bias.float() + if not self.config.pre_layer_norm and residual_add is not None: + # only use residual add if its set and we are not pre layer norm + residual = residual_add + + self.residual_add_func(hidden_state, residual, attention_output, attention_bias, final_bias, + self.config.mp_size, self.config.mlp_after_attn, add_bias, + self.config.pre_layer_norm) - input_dtype = hidden_state.dtype - residual = tmp.to(input_dtype) - else: - raise NotImplementedError return residual diff --git a/deepspeed/ops/transformer/inference/op_binding/rms_norm.py b/deepspeed/ops/transformer/inference/op_binding/rms_norm.py new file mode 100644 index 000000000000..128883ce5d43 --- /dev/null +++ b/deepspeed/ops/transformer/inference/op_binding/rms_norm.py @@ -0,0 +1,33 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch +from ..config import DeepSpeedInferenceConfig +from .base import BaseOp + + +class RMSNormOp(BaseOp): + + def __init__(self, config: DeepSpeedInferenceConfig = None): + if config is None: + config = DeepSpeedInferenceConfig() + super(RMSNormOp, self).__init__(config) + try: + self.rms_norm_func = self.inference_module.rms_norm + except AttributeError: + self.rms_norm_func = self.rms_norm_fallback + + @staticmethod + def rms_norm_fallback(vals, gamma, epsilon): + variance = vals.to(torch.float32).pow(2).mean(-1, keepdim=True) + vals = vals * torch.rsqrt(variance + epsilon) + + if gamma.dtype in [torch.float16, torch.bfloat16]: + vals = vals.to(gamma.dtype) + + return gamma * vals + + def forward(self, vals, gamma, epsilon): + return self.rms_norm_func(vals, gamma, epsilon) diff --git a/deepspeed/ops/transformer/inference/op_binding/softmax.py b/deepspeed/ops/transformer/inference/op_binding/softmax.py index bc309d94df14..b408883d5cfd 100644 --- a/deepspeed/ops/transformer/inference/op_binding/softmax.py +++ b/deepspeed/ops/transformer/inference/op_binding/softmax.py @@ -3,11 +3,11 @@ # DeepSpeed Team -import os import torch import torch.nn.functional as F from ..config import DeepSpeedInferenceConfig from .base import BaseOp +from deepspeed.ops.transformer.inference.op_binding.workspace import InferenceContext class SoftmaxOp(BaseOp): @@ -25,24 +25,45 @@ def __init__(self, config: DeepSpeedInferenceConfig): except AttributeError: self.softmax_func = self.softmax_fallback - def softmax_fallback(self, attn_scores, attn_mask, alibi, triangular, recompute, local_attention, window_size, - async_op, layer_scale, head_offset, mp_size): - if os.environ.get('DS_KI_FALLBACK') == 'True': - alibi = alibi[head_offset:head_offset + self.num_attention_heads_per_partition] - input_dtype = attn_scores.dtype - if (triangular): - tri = ~torch.tril(torch.ones(attn_scores.size(), device=attn_scores.device)).to(bool) - attn_scores = torch.masked_fill(attn_scores * layer_scale, tri, torch.finfo(input_dtype).min) - if alibi is not None: - attn_scores += alibi - if attn_mask is not None: - # expand atten_mask from two dim into 4 dim, insert two dims in the middle + @staticmethod + def softmax_fallback(attn_scores, attn_mask, alibi, triangular, recompute, local_attention, window_size, async_op, + layer_scale, head_offset, mp_size): + scores_len = len(attn_scores.size()) + heads = 1 + if scores_len > 1: + heads = attn_scores.size()[1] + num_attention_heads_per_partition = heads // mp_size + + if alibi is not None: + if len(alibi.shape) == 1: + alibi = None + else: + alibi = alibi[head_offset:head_offset + num_attention_heads_per_partition] + if attn_mask is not None and len(attn_mask.shape) == 1: + attn_mask = None + input_dtype = attn_scores.dtype + attn_scores *= layer_scale + + if alibi is not None: + attn_scores += alibi + if attn_mask is not None: + # expand atten_mask from two dim into 4 dim, insert two dims in the middle + if len(attn_mask.shape) == 2: + # The above if statement was added because the mask was already 4D so this + # expansion should be avoided as it expands to 6D and crashes later (in bloom + # HE KI FB) attn_mask = attn_mask[:, None, None, :] - attn_scores += attn_mask - output = F.softmax(attn_scores, dim=-1, dtype=torch.float32).to(input_dtype) - return output - else: - raise NotImplementedError + attn_scores += attn_mask + if triangular: + if attn_scores.shape[2] == 1: # query using kv cache + token_idx = InferenceContext.Instance().current_tokens() + tri = torch.arange(attn_scores.shape[2], device=attn_scores.device).ge(token_idx) + else: + tri = ~torch.tril(torch.ones(attn_scores.size(), device=attn_scores.device)).to(bool) + attn_scores = torch.masked_fill(attn_scores, tri, float('-inf')) + output = F.softmax(attn_scores, dim=-1, dtype=torch.float32).to(input_dtype) + + return output def forward(self, attn_scores: torch.Tensor, attn_mask: torch.Tensor, alibi: torch.Tensor, triangular: bool, recompute: bool, local_attention: bool, window_size: int, async_op: bool, layer_scale: float, diff --git a/deepspeed/ops/transformer/inference/op_binding/softmax_context.py b/deepspeed/ops/transformer/inference/op_binding/softmax_context.py index 0dc4e08a3633..b9b521e4cd53 100644 --- a/deepspeed/ops/transformer/inference/op_binding/softmax_context.py +++ b/deepspeed/ops/transformer/inference/op_binding/softmax_context.py @@ -7,6 +7,8 @@ from deepspeed import comm as dist from ..config import DeepSpeedInferenceConfig from .base import BaseOp +from .softmax import SoftmaxOp +from deepspeed.ops.transformer.inference.op_binding.workspace import InferenceContext class SoftmaxContextOp(BaseOp): @@ -23,13 +25,109 @@ def __init__(self, config: DeepSpeedInferenceConfig): except AttributeError: self.softmax_context_func = self.softmax_context_fallback + @staticmethod + def transform4d_0213(x, seq_length): + assert x.dim() == 3, F"{x.dim()=} is not supported" + batch_size, num_heads, seq_length_head_dim = x.shape + head_dim = seq_length_head_dim // seq_length + x = x.view(batch_size, num_heads, seq_length, head_dim) + x = x.permute(0, 2, 1, 3) + + return x + + @staticmethod + def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor: + batch, num_key_value_heads, slen, head_dim = hidden_states.shape + if n_rep <= 1 or num_key_value_heads == 1: + return hidden_states + + hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim) + + return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim) + + @staticmethod + def bias_add_transform_0213(input, bias, num_heads, trans_count, perform_bias=False): + assert trans_count == 1 or trans_count == 3, F"{trans_count=} is not supported" + assert input.dim() == 3, F"{input.dim()=} is not supported" + input_biased = torch.add(input, bias) if perform_bias else input + batch_size, seq_length, value_size = input_biased.shape + hid_dim = value_size // trans_count + head_dim = hid_dim // num_heads + + if trans_count == 1: + query_layer = input.view(batch_size, seq_length, num_heads, head_dim) + query_layer = query_layer.permute(0, 2, 1, 3) + key_layer = torch.zeros_like(query_layer) + value_layer = torch.zeros_like(query_layer) + return query_layer, key_layer, value_layer + + qkv_layers = input.view(batch_size, seq_length, 3, num_heads, head_dim) + query_layer, key_layer, value_layer = qkv_layers[..., 0, :, :], qkv_layers[..., 1, :, :], qkv_layers[..., + 2, :, :] + query_layer = query_layer.transpose(1, 2) + key_layer = key_layer.transpose(1, 2) + value_layer = value_layer.transpose(1, 2) + + return query_layer, key_layer, value_layer + def softmax_context_fallback(self, query_key_value, attn_mask, rotary_dim, rotate_half, rotate_every_two, heads, num_kv, norm_factor, triangular_masking, local_attention, window_size, no_masking, - layer_id, num_layers, alibi, rope_theta): - raise NotImplementedError + layer_id, num_layers, alibi, rope_theta, is_prompt, token_idx, position_ids): + bat_0213_query, bat_0213_key, bat_0213_value = self.bias_add_transform_0213( + query_key_value, None, heads, 3, False) + + if rotary_dim > 0 and rotate_half: + from transformers.models.llama.modeling_llama import apply_rotary_pos_emb + + rotary = InferenceContext.Instance().get_rotary(rotary_dim, rope_theta, bat_0213_value.device) + cos, sin = rotary(bat_0213_value, InferenceContext.Instance().get_max_tokens_num()) + # TODO: SW-170999 Optimize RoPE implementation. + bat_0213_query, bat_0213_key = apply_rotary_pos_emb(bat_0213_query, bat_0213_key, cos, sin, position_ids) + + bat_0213_key, bat_0213_value = InferenceContext.Instance().update_cache(layer_id, token_idx, is_prompt, + bat_0213_key, bat_0213_value) + + bat_0213_key = self.repeat_kv(bat_0213_key, num_kv) + bat_0213_value = self.repeat_kv(bat_0213_value, num_kv) + + bsz = query_key_value.shape[0] + head_dim = query_key_value.shape[2] // (heads * 3) + + bmm_output = torch.bmm(bat_0213_query.reshape(bsz * heads, bat_0213_query.shape[2], head_dim), + bat_0213_key.reshape(bsz * heads, bat_0213_key.shape[2], head_dim).transpose(1, 2)) + + layer_scale = 1.0 + if alibi is not None and len(alibi.shape) > 1: + layer_scale = max(1, layer_id).to(float) + + alpha = norm_factor * norm_factor / layer_scale + bmm_output *= alpha + bmm_output_reshape = bmm_output.reshape(bsz, heads, bmm_output.shape[1], bmm_output.shape[2]) + + recompute = is_prompt + if attn_mask is not None and len(attn_mask.shape) > 1 and attn_mask.shape[-1] < bmm_output_reshape.shape[3]: + attn_mask = torch.nn.functional.pad(attn_mask, (0, bmm_output_reshape.shape[3] - attn_mask.shape[-1]), + value=torch.finfo(attn_mask.dtype).min) + softmax_output = SoftmaxOp.softmax_fallback(bmm_output_reshape, attn_mask, alibi, triangular_masking, + recompute, local_attention, window_size, None, layer_scale, 0, 1) + + output = torch.bmm(softmax_output.reshape(bsz * heads, softmax_output.shape[2], softmax_output.shape[3]), + bat_0213_value.reshape(bsz * heads, bat_0213_value.shape[2], head_dim)) + + output = output.reshape(bsz, heads, output.shape[1], head_dim) + output = output.reshape(bsz, heads, output.shape[2] * head_dim) + input_seq_len = query_key_value.shape[1] + t4d_0123_output = self.transform4d_0213(output, input_seq_len) + t4d_0123_output = t4d_0123_output.reshape(bsz, t4d_0123_output.shape[1], heads * head_dim) + + if layer_id == num_layers - 1: + InferenceContext.Instance().advance_tokens() + + return t4d_0123_output, bat_0213_key, bat_0213_value def forward(self, query_key_value: torch.Tensor, attn_mask: torch.Tensor, heads: int, num_kv: int, - norm_factor: float, no_masking: bool, layer_id: int, num_layers: int, alibi: torch.Tensor): + norm_factor: float, no_masking: bool, layer_id: int, num_layers: int, alibi: torch.Tensor, + is_prompt: bool, token_idx: torch.Tensor, position_ids: torch.Tensor): if alibi is not None: batch_heads = query_key_value.shape[0] * heads @@ -42,6 +140,6 @@ def forward(self, query_key_value: torch.Tensor, attn_mask: torch.Tensor, heads: self.config.rotate_every_two, heads, num_kv, norm_factor, self.config.triangular_masking, self.config.local_attention, self.config.window_size, no_masking, layer_id, num_layers, alibi, - self.config.rope_theta) + self.config.rope_theta, is_prompt, token_idx, position_ids) return output diff --git a/deepspeed/ops/transformer/inference/op_binding/vector_add.py b/deepspeed/ops/transformer/inference/op_binding/vector_add.py new file mode 100644 index 000000000000..015340a1084b --- /dev/null +++ b/deepspeed/ops/transformer/inference/op_binding/vector_add.py @@ -0,0 +1,28 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from ..config import DeepSpeedInferenceConfig +from .base import BaseOp + + +class VectorAddOp(BaseOp): + + def __init__(self, config: DeepSpeedInferenceConfig = None): + if config is None: + config = DeepSpeedInferenceConfig() + super(VectorAddOp, self).__init__(config) + try: + self.vector_add_func = self.inference_module._vector_add + except AttributeError: + self.vector_add_func = self.vector_add_fallback + + @classmethod + def vector_add_fallback(cls, a, b, gamma): + """Based on csrc/transformer/inference/csrc/pt_binding.cpp code of _vector_add""" + dtype = a.dtype + return (gamma * a.float() + b.float()).to(dtype) + + def forward(self, a, b, gamma): + return self.vector_add_func(a, b, gamma) diff --git a/deepspeed/ops/transformer/inference/op_binding/vector_matmul.py b/deepspeed/ops/transformer/inference/op_binding/vector_matmul.py index 011be859634d..cabab8d8c4ab 100644 --- a/deepspeed/ops/transformer/inference/op_binding/vector_matmul.py +++ b/deepspeed/ops/transformer/inference/op_binding/vector_matmul.py @@ -3,7 +3,6 @@ # DeepSpeed Team -import os import torch from ..config import DeepSpeedInferenceConfig from .base import BaseOp @@ -25,7 +24,7 @@ def __init__(self, config: DeepSpeedInferenceConfig): else: self.vector_matmul_func = self.inference_module.vector_matmul_fp16 elif self.config.dtype == torch.int8: - self.vector_matmul_func = self.inference_module.vector_matmul_fp16 + self.vector_matmul_func = self.inference_module.vector_matmul_int8 elif self.config.dtype == torch.bfloat16: self.vector_matmul_func = self.inference_module.vector_matmul_bf16 else: @@ -34,10 +33,7 @@ def __init__(self, config: DeepSpeedInferenceConfig): self.vector_matmul_func = self.vector_matmul_fallback def vector_matmul_fallback(self, input, weight, async_op, q_scale, q_int8, transpose): - if os.environ.get('DS_KI_FALLBACK') == 'True' and not transpose: - return torch.matmul(input, weight) - else: - raise NotImplementedError + return torch.matmul(input, weight.t() if transpose else weight) def forward(self, input: torch.Tensor, weight: torch.Tensor, async_op: bool = False): q_scale = weight.scale if hasattr(weight, 'scale') else torch.empty(1) diff --git a/deepspeed/ops/transformer/inference/op_binding/workspace.py b/deepspeed/ops/transformer/inference/op_binding/workspace.py new file mode 100644 index 000000000000..b06b1ca0bd32 --- /dev/null +++ b/deepspeed/ops/transformer/inference/op_binding/workspace.py @@ -0,0 +1,204 @@ +# Copyright (c) 2023 Habana Labs, Ltd. an Intel Company +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch +from ..config import DeepSpeedInferenceConfig +from .base import BaseOp + +minus_inf = -10000.0 +key_idx = 0 +value_idx = 1 + + +class InferenceContext: + + __instance = None + + def __init__(self): + self.kv_cache = None + self.kv_cache_elem_dtype = None + self.num_tokens = 1 + self.kv_cache_num_layers = None + self.kv_cache_size = None + self.max_out_tokens = None + self.rotary = None + self.allocate_called = False + self.static_shapes = True + + @classmethod + def Instance(cls): + if InferenceContext.__instance is None: + InferenceContext.__instance = InferenceContext() + return InferenceContext.__instance + + def gen_workspace(self, num_layers, num_heads, batch_size, prompt_len, hidden_dim, mp_size, external_cache, + elem_dtype, rank, max_out_tokens, min_out_tokens): + self.allocate_called = True + self.kv_cache = None + if not external_cache: + self.kv_cache_num_layers = num_layers + self.max_out_tokens = max_out_tokens + head_size = hidden_dim // num_heads + self.kv_cache_size = torch.Size([batch_size, (num_heads // mp_size), max_out_tokens, head_size]) + self.kv_cache_elem_dtype = elem_dtype + self.num_tokens = 0 + self.static_shapes = True + return True + + def retake_workspace(self): + return True + + def _retake_workspace(self): + assert self.allocate_called, "retake workspace called before allocate workspace" + + import deepspeed.accelerator as accelerator + if self.kv_cache is None: + self.kv_cache = [] + for layer in range(self.kv_cache_num_layers): + self.kv_cache.append((torch.zeros(self.kv_cache_size, + dtype=self.kv_cache_elem_dtype, + device=accelerator.get_accelerator().device_name()), + torch.zeros(self.kv_cache_size, + dtype=self.kv_cache_elem_dtype, + device=accelerator.get_accelerator().device_name()))) + + return True + + def update_cache(self, layer_id, token_idx, is_prompt, bat_0213_key, bat_0213_value): + has_workspace = self._retake_workspace() + assert has_workspace, "Could not allocate workspace" + + # Update current token + if is_prompt: + self.static_shapes = True + if token_idx is None: + self.static_shapes = False + InferenceContext.Instance().reset_tokens(bat_0213_key.shape[2]) + else: + InferenceContext.Instance().reset_tokens(token_idx) + + if token_idx is None: + token_idx = InferenceContext.Instance().current_tokens() + + bsz = bat_0213_key.shape[0] + + # Update cache content + if is_prompt: + cache_max_seq = self.kv_cache_size[2] + cache_max_head_dim = self.kv_cache_size[3] + seq = bat_0213_key.shape[2] + + mask = torch.arange(cache_max_seq, device=bat_0213_key.device) + mask = mask.ge(token_idx) + mask = mask.unsqueeze(-1) + mask = mask.expand([cache_max_seq, cache_max_head_dim]) + + self.kv_cache[layer_id][key_idx][:bsz, :, :seq, :].copy_(bat_0213_key) + self.kv_cache[layer_id][key_idx][:bsz, :].masked_fill_(mask, 0) + self.kv_cache[layer_id][value_idx][:bsz, :, :seq, :].copy_(bat_0213_value) + self.kv_cache[layer_id][value_idx][:bsz, :].masked_fill_(mask, 0) + else: + if self.static_shapes: + assert type(token_idx) == torch.Tensor, "token_idx is expected to be torch.Tensor" + self.kv_cache[layer_id][key_idx][:bsz].index_copy_(2, token_idx - 1, bat_0213_key) + self.kv_cache[layer_id][value_idx][:bsz].index_copy_(2, token_idx - 1, bat_0213_value) + else: + assert type(token_idx) == int, "token_idx is expected to be int" + self.kv_cache[layer_id][key_idx][:bsz, :, token_idx - 1:token_idx, :] = bat_0213_key + self.kv_cache[layer_id][value_idx][:bsz, :, token_idx - 1:token_idx, :] = bat_0213_value + + bat_0213_key = self.kv_cache[layer_id][key_idx][:bsz] + bat_0213_value = self.kv_cache[layer_id][value_idx][:bsz] + + if not self.static_shapes: + bat_0213_key = bat_0213_key[:, :, :token_idx, :] + bat_0213_value = bat_0213_value[:, :, :token_idx, :] + + return bat_0213_key, bat_0213_value + + def release_workspace(self): + self.kv_cache = None + self.rotary = None + + def reset_tokens(self, initial_tokens=1): + self.num_tokens = initial_tokens + + def current_tokens(self): + return self.num_tokens + + def advance_tokens(self): + self.num_tokens = self.num_tokens + 1 + + def get_kv_cache(self): + return self.kv_cache + + def get_rotary(self, rotary_dim, rope_theta, device=None): + if self.rotary is None: + from transformers.models.llama.modeling_llama import LlamaRotaryEmbedding + + self.rotary = LlamaRotaryEmbedding(rotary_dim, base=rope_theta, device=device) + + return self.rotary + + def get_max_tokens_num(self): + return self.max_out_tokens + + +class WorkspaceOp(BaseOp): + + def __init__(self, config: DeepSpeedInferenceConfig = None): + if config is None: + config = DeepSpeedInferenceConfig() + super(WorkspaceOp, self).__init__(config) + + self.inference_context = InferenceContext.Instance() + try: + if config.dtype == torch.float32: + self.allocate_workspace = self.inference_module.allocate_workspace_fp32 + elif config.dtype == torch.bfloat16: + self.allocate_workspace = self.inference_module.allocate_workspace_bf16 + else: + self.allocate_workspace = self.inference_module.allocate_workspace_fp16 + self.release_workspace = self.inference_module.release_workspace + self.retake_workspace = self.inference_module.retake_workspace + self.reset_cache = self.inference_module.reset_cache + except AttributeError: + if config.dtype == torch.float32: + self.allocate_workspace = self.allocate_workspace_fp32_fallback + elif config.dtype == torch.bfloat16: + self.allocate_workspace = self.allocate_workspace_bf16_fallback + else: + self.allocate_workspace = self.allocate_workspace_fp16_fallback + self.release_workspace = self.release_workspace_fallback + self.retake_workspace = self.retake_workspace_fallback + self.reset_cache = self.reset_cache_fallback + + def allocate_workspace_fp32_fallback(self, hidden_dim, num_heads, prompt_length, batch_size, num_layers, mp_size, + external_cache, rank, max_out_tokens, min_out_tokens): + return self.inference_context.gen_workspace(num_layers, num_heads, batch_size, prompt_length, hidden_dim, + mp_size, external_cache, torch.float, rank, max_out_tokens, + min_out_tokens) + + def allocate_workspace_bf16_fallback(self, hidden_dim, num_heads, prompt_length, batch_size, num_layers, mp_size, + external_cache, rank, max_out_tokens, min_out_tokens): + return self.inference_context.gen_workspace(num_layers, num_heads, batch_size, prompt_length, hidden_dim, + mp_size, external_cache, torch.bfloat16, rank, max_out_tokens, + min_out_tokens) + + def allocate_workspace_fp16_fallback(self, hidden_dim, num_heads, prompt_length, batch_size, num_layers, mp_size, + external_cache, rank, max_out_tokens, min_out_tokens): + return self.inference_context.gen_workspace(num_layers, num_heads, batch_size, prompt_length, hidden_dim, + mp_size, external_cache, torch.half, rank, max_out_tokens, + min_out_tokens) + + def reset_cache_fallback(self): + return self.inference_context.reset_tokens() + + def release_workspace_fallback(self): + return self.inference_context.release_workspace() + + def retake_workspace_fallback(self): + return self.inference_context.retake_workspace() diff --git a/deepspeed/ops/transformer/inference/triton/attention.py b/deepspeed/ops/transformer/inference/triton/attention.py index c05370ec74e5..6845d91b06be 100644 --- a/deepspeed/ops/transformer/inference/triton/attention.py +++ b/deepspeed/ops/transformer/inference/triton/attention.py @@ -125,7 +125,7 @@ def _triton_autotune(min_seqlen, context_4d_matmul(output, qkv, head_size) Fp16Matmul._update_autotune_table() - def ds_compute_attention(self, qkv_out, input_mask, layer_past, alibi): + def ds_compute_attention(self, qkv_out, input_mask, layer_past, alibi, is_prompt, token_idx, position_ids): if isinstance(qkv_out, list): qkv_out = qkv_out[0] @@ -143,7 +143,10 @@ def ds_compute_attention(self, qkv_out, input_mask, layer_past, alibi): no_masking=no_masking, layer_id=self.config.layer_id, num_layers=TritonSelfAttention.num_layers, - alibi=alibi) + alibi=alibi, + is_prompt=is_prompt, + token_idx=token_idx, + position_ids=position_ids) context_layer, key_layer, value_layer = attn_key_value return context_layer, key_layer, value_layer @@ -161,7 +164,8 @@ def forward( norm_w=None, norm_b=None, alibi=None, - use_triton_attention=True): + use_triton_attention=True, + **kwargs): if not self.config.pre_layer_norm: qkv_out = self.linear_func(input=input, @@ -192,10 +196,16 @@ def forward( triangular=self.triangular_masking) key_layer, value_layer = qkv[:, :, self.hidden_size:2 * self.hidden_size], qkv[:, :, 2 * self.hidden_size:] else: + is_prompt = kwargs.get("first_token", qkv_out[0].shape[1] > 1) + token_idx = kwargs.get("token_idx", None) + position_ids = kwargs.get("position_ids", None) context_layer, key_layer, value_layer = self.ds_compute_attention(qkv_out=qkv_out, input_mask=input_mask, layer_past=layer_past, - alibi=alibi) + alibi=alibi, + is_prompt=is_prompt, + toke_idx=token_idx, + position_ids=position_ids) output = self.vector_matmul_func(input=context_layer, weight=self.attn_ow) inp_norm = qkv_out[-1] diff --git a/deepspeed/ops/transformer/inference/triton/ops.py b/deepspeed/ops/transformer/inference/triton/ops.py index dd87d08d4d2c..dbed45313780 100644 --- a/deepspeed/ops/transformer/inference/triton/ops.py +++ b/deepspeed/ops/transformer/inference/triton/ops.py @@ -3,12 +3,10 @@ # DeepSpeed Team -import deepspeed -from deepspeed.ops.op_builder import InferenceBuilder import deepspeed.ops.transformer.inference.triton.matmul_ext as matmul_ext +from deepspeed.ops.transformer.inference.op_binding.layer_norm import LayerNormOp from deepspeed.ops.transformer.inference.triton.layer_norm import layer_norm, layer_norm_residual - -inference_module = None +from deepspeed.utils.types import ActivationFuncType def vector_matmul_func(input, weight, async_op, q_scale, q_int8, transposed_mode): @@ -76,15 +74,12 @@ def mlp_gemm_func(input, if use_triton_ln: mlp_input = layer_norm_residual(input, input_bias, residual, gamma, beta, epsilon) else: - global inference_module - if inference_module is None: - inference_module = InferenceBuilder().load() - mlp_input = inference_module._layer_norm_residual(input, input_bias, residual, gamma, beta, epsilon) + mlp_input = LayerNormOp.layer_norm_residual(input, input_bias, residual, gamma, beta, epsilon) # activation - if deepspeed.utils.types.ActivationFuncType(mlp_act_func_type) == deepspeed.utils.types.ActivationFuncType.GELU: + if ActivationFuncType(mlp_act_func_type) == ActivationFuncType.GELU: activation = "gelu" - elif deepspeed.utils.types.ActivationFuncType(mlp_act_func_type) == deepspeed.utils.types.ActivationFuncType.ReLU: + elif ActivationFuncType(mlp_act_func_type) == ActivationFuncType.ReLU: activation = "relu" else: activation = "" @@ -121,10 +116,7 @@ def qkv_gemm_func( if use_triton_ln: qkv_input = layer_norm(input, gamma, beta, epsilon) else: - global inference_module - if inference_module is None: - inference_module = InferenceBuilder().load() - qkv_input = inference_module.layer_norm(input, gamma, beta, epsilon) + qkv_input = LayerNormOp()(input, gamma, beta, epsilon) qkv_out = matmul_ext.matmul(qkv_input, weight, bias=(bias if add_bias else None), activation="", use_triton=True) diff --git a/deepspeed/runtime/__init__.py b/deepspeed/runtime/__init__.py index 347ff7993d82..208299fb8c50 100644 --- a/deepspeed/runtime/__init__.py +++ b/deepspeed/runtime/__init__.py @@ -2,11 +2,3 @@ # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team - - -class DeepSpeedOptimizer(object): - pass - - -class ZeROOptimizer(DeepSpeedOptimizer): - pass diff --git a/deepspeed/runtime/activation_checkpointing/checkpointing.py b/deepspeed/runtime/activation_checkpointing/checkpointing.py index 02e0b197e927..d98ff71d7ab9 100644 --- a/deepspeed/runtime/activation_checkpointing/checkpointing.py +++ b/deepspeed/runtime/activation_checkpointing/checkpointing.py @@ -25,9 +25,12 @@ from deepspeed.runtime.config import DeepSpeedConfig from deepspeed.utils import logger -from deepspeed.runtime.utils import copy_to_device, move_to_device, see_memory_usage, bwc_tensor_model_parallel_rank +from deepspeed.runtime.utils import copy_to_device, move_to_device, see_memory_usage from deepspeed.utils.timer import SynchronizedWallClockTimer as Timers, FORWARD_GLOBAL_TIMER +from deepspeed.utils.bwc import bwc_tensor_model_parallel_rank from deepspeed.accelerator import get_accelerator +from deepspeed.runtime import compiler +import os # DeepSpeed Checkpointing Enabled or Disabled deepspeed_checkpointing_enabled = False @@ -605,6 +608,9 @@ def backward(ctx, *grads): # removing pointers to the contiguous buffer memory # so that they can be garbage collected once the checkpoints # have been used + if grads[0].device.type == 'hpu': + import habana_frameworks.torch as htorch + htorch.core.mark_step() if SYNCHRONIZE: get_accelerator().synchronize() if PROFILE_TIME: @@ -986,6 +992,23 @@ def after_backward_hook(_nonuse_grads): return tuple(all_outputs) +#TODO[SW-187093]: remove once WA in original pytorch code is resolved +disable_compile_bypass = os.environ.get('HL_DS_DISABLE_CHECKPOINTING_COMPILE_BYPASS', None) + + +def conditional_compile_disable(decorator_func): + + def conditional_decorator(function): + global disable_compile_bypass + if disable_compile_bypass: + return function # don't use compiler.disable decorator + return decorator_func(function) # use compiler.disable decorator + + return conditional_decorator + + +#TODO[SW-187093]: remove once WA in original pytorch code is resolved +@conditional_compile_disable(compiler.disable) def checkpoint(function, *args): """Checkpoint a model or part of the model. This has been directly copied from torch.utils.checkpoint. """ diff --git a/deepspeed/runtime/base_optimizer.py b/deepspeed/runtime/base_optimizer.py new file mode 100644 index 000000000000..6cfd66f1cc38 --- /dev/null +++ b/deepspeed/runtime/base_optimizer.py @@ -0,0 +1,63 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import os +import torch + +from deepspeed.utils import logger +from deepspeed.utils.tensor_fragment import map_to_flat_opt_states +from deepspeed.runtime.utils import bwc_tensor_model_parallel_rank + + +class DeepSpeedOptimizer(object): + pass + + +class ZeROOptimizer(DeepSpeedOptimizer): + + def load_hp_checkpoint_state_from_checkpoint_dir(self, lp_groups_name: str, checkpoint_dir: str) -> None: + checkpoint_dir = os.path.join(checkpoint_dir, "zero") + optim_state_path = os.path.join(checkpoint_dir, "optimizer_state.pt") + assert os.path.isfile( + optim_state_path), f'{optim_state_path} containing optimizer global state is missing! Cannot proceed.' + optim_sd = torch.load(optim_state_path) + + self._load_global_state(optim_sd) + + tp_rank = bwc_tensor_model_parallel_rank(mpu=self.mpu) + if self.mpu is None: + logger.warn("MPU is not provided, setting tp size to 1 in checkpoint loading.") + tp_world_size = 1 + else: + tp_world_size = self.mpu.get_slice_parallel_world_size() if hasattr(self.mpu, "get_slice_parallel_world_size") \ + else self.mpu.get_tensor_model_parallel_world_size() + + for i, (param_group, + loaded_param_group) in enumerate(zip(self.optimizer.param_groups, optim_sd['param_groups'])): + # We have an assumption that all params in the same param_group have the same keys + opt_keys = set() + steps = [] + + lp_groups = getattr(self, lp_groups_name) + for lp in lp_groups[i]: + if lp._hp_mapping is not None: + #print(f"Loading {self.param_names[lp]} {tp_rank=} {tp_world_size=}") + step = lp.load_hp_checkpoint_state(os.path.join(checkpoint_dir, self.param_names[lp]), tp_rank, + tp_world_size) + for key in lp._hp_mapping.get_optim_state_keys(): + opt_keys.add(key) + steps.append(step) + + hp_param = param_group['params'][0] + assert all(step == steps[0] for step in steps), f"Steps {steps} are not equal" + if steps[0] is not None: + self.optimizer.state[hp_param]['step'] = steps[0] + + map_to_flat_opt_states(hp_param, lp_groups[i], self.optimizer.state, opt_keys) + + for key, value in loaded_param_group.items(): + if key == 'params': + continue + param_group[key] = value diff --git a/deepspeed/runtime/bf16_optimizer.py b/deepspeed/runtime/bf16_optimizer.py index aaa836bf1c31..2cf357dd834b 100644 --- a/deepspeed/runtime/bf16_optimizer.py +++ b/deepspeed/runtime/bf16_optimizer.py @@ -6,19 +6,19 @@ from collections import OrderedDict import torch import sys -import os from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors from deepspeed import comm as dist from deepspeed.runtime.constants import PIPE_REPLICATED -from deepspeed.runtime import ZeROOptimizer +from deepspeed.runtime.base_optimizer import ZeROOptimizer from packaging import version as pkg_version - from deepspeed.git_version_info import version from deepspeed.runtime.utils import (get_global_norm_of_tensors, clip_tensors_by_global_norm, DummyOptim, - align_dense_tensors, all_gather_dp_groups, bwc_tensor_model_parallel_rank, - is_model_parallel_parameter, see_memory_usage, graph_process) + align_dense_tensors, all_gather_dp_groups, is_model_parallel_parameter, + see_memory_usage, graph_process, get_norm_with_moe_layers) +from deepspeed.utils import link_hp_params, lazy_init_hp_params_optimizer_state, fragment_address, groups +from deepspeed.moe.utils import is_moe_param, is_moe_param_group +from deepspeed.utils.bwc import bwc_tensor_model_parallel_rank -from deepspeed.utils import link_hp_params, lazy_init_hp_params_optimizer_state, fragment_address from deepspeed.checkpoint import enable_universal_checkpoint from deepspeed.checkpoint.constants import (DS_VERSION, PARTITION_COUNT, BASE_OPTIMIZER_STATE, SINGLE_PARTITION_OF_FP32_GROUPS, CLIP_GRAD, GROUP_PADDINGS, @@ -40,7 +40,8 @@ def __init__(self, timers=None, grad_acc_dtype=None, graph_harvesting=False, - immediate_grad_update=False): + immediate_grad_update=False, + has_moe_layers=False): super().__init__() see_memory_usage('begin bf16_optimizer', force=True) self.timers = timers @@ -59,7 +60,11 @@ def __init__(self, self.allgather_bucket_size = int(allgather_bucket_size) self.dp_process_group = dp_process_group self.dp_rank = dist.get_rank(group=self.dp_process_group) + self.has_moe_layers = has_moe_layers + self.non_expert_gradients = [] self.real_dp_process_group = [dp_process_group for i in range(len(self.optimizer.param_groups))] + if self.has_moe_layers: + self._configure_moe_settings() # Use torch (un)flatten ops self.flatten = _flatten_dense_tensors @@ -90,11 +95,26 @@ def __init__(self, see_memory_usage('end bf16_optimizer', force=True) + def _configure_moe_settings(self): + assert any( + [is_moe_param_group(group) for group in self.optimizer.param_groups] + ), "The model has moe layers, but None of the param groups are marked as MoE. Create a param group with 'moe' key set to True before creating optimizer" + + for i, group in enumerate(self.optimizer.param_groups): + if is_moe_param_group(group): + assert all([is_moe_param(param) + for param in group['params']]), "All params in MoE group must be MoE params" + self.real_dp_process_group[i] = groups._get_expert_data_parallel_group(group['name']) + self.expert_gradients = {} + if self.has_moe_layers: + for key in groups._get_expert_data_parallel_group_dict().keys(): + self.expert_gradients[key] = [] + def _setup_for_real_optimizer(self): - dp_world_size = dist.get_world_size(group=self.dp_process_group) - self.partition_count = [dp_world_size for i in range(len(self.optimizer.param_groups))] + self.partition_count = [dist.get_world_size(group=pg) for pg in self.real_dp_process_group] for i, param_group in enumerate(self.optimizer.param_groups): + real_dp_world_size = dist.get_world_size(group=self.real_dp_process_group[i]) see_memory_usage(f'before initializing group {i}', force=True) partition_id = dist.get_rank(group=self.real_dp_process_group[i]) @@ -106,17 +126,16 @@ def _setup_for_real_optimizer(self): # create flat bf16 params self.bf16_groups_flat.append( self._flatten_dense_tensors_aligned(self.bf16_groups[i], - self.nccl_start_alignment_factor * dp_world_size)) - + self.nccl_start_alignment_factor * real_dp_world_size)) # Make bf16 params point to flat tensor storage self._update_storage_to_flattened_tensor(tensor_list=self.bf16_groups[i], flat_tensor=self.bf16_groups_flat[i]) # divide flat weights into equal sized partitions - partition_size = self.bf16_groups_flat[i].numel() // dp_world_size + partition_size = self.bf16_groups_flat[i].numel() // real_dp_world_size bf16_dp_partitions = [ self.bf16_groups_flat[i].narrow(0, dp_index * partition_size, partition_size) - for dp_index in range(dp_world_size) + for dp_index in range(real_dp_world_size) ] self.bf16_partitioned_groups.append(bf16_dp_partitions) @@ -127,8 +146,12 @@ def _setup_for_real_optimizer(self): num_elem_list = [t.numel() for t in self.bf16_groups[i]] # create fp32 gradients - self.fp32_groups_gradients_flat.append( - torch.zeros_like(self.bf16_groups_flat[i], dtype=self.grad_acc_dtype)) + fp32_flat_buffer = torch.zeros_like(self.bf16_groups_flat[i], dtype=self.grad_acc_dtype) + self.fp32_groups_gradients_flat.append(fp32_flat_buffer) + if self.has_moe_layers and is_moe_param_group(param_group): + self.expert_gradients[param_group['name']].append(fp32_flat_buffer) + else: + self.non_expert_gradients.append(fp32_flat_buffer) # track individual fp32 gradients for entire model fp32_gradients = self._split_flat_tensor(flat_tensor=self.fp32_groups_gradients_flat[i], @@ -191,11 +214,12 @@ def _create_param_mapping(self): return param_mapping def _link_all_hp_params(self): - dp_world_size = dist.get_world_size(group=self.dp_process_group) for i, _ in enumerate(self.optimizer.param_groups): + real_dp_world_size = dist.get_world_size(group=self.real_dp_process_group[i]) + # Link bf16 and fp32 params in partition partition_id = dist.get_rank(group=self.real_dp_process_group[i]) - partition_size = self.bf16_groups_flat[i].numel() // dp_world_size + partition_size = self.bf16_groups_flat[i].numel() // real_dp_world_size flat_hp_partition = self.fp32_groups_flat_partition[i] link_hp_params(lp_param_list=self.bf16_groups[i], flat_hp_partition=flat_hp_partition, @@ -257,10 +281,18 @@ def step(self, closure=None): if closure is not None: raise NotImplementedError(f'{self.__class__} does not support closure.') - all_groups_norm = get_global_norm_of_tensors(input_tensors=self.get_grads_for_norm(), - mpu=self.mpu, - norm_type=self.norm_type, - use_graph=self.graph_harvesting) + non_expert_grads_for_norm, expert_grads_for_norm = self.get_grads_for_norm() + non_expert_groups_norm = get_global_norm_of_tensors(input_tensors=non_expert_grads_for_norm, + mpu=self.mpu, + norm_type=self.norm_type, + use_graph=self.graph_harvesting) + all_groups_norm = non_expert_groups_norm + if self.has_moe_layers: + all_groups_norm = get_norm_with_moe_layers(non_expert_groups_norm, + mpu=self.mpu, + expert_tensors=expert_grads_for_norm, + norm_type=self.norm_type) + self._global_grad_norm = all_groups_norm assert all_groups_norm > 0. @@ -271,8 +303,18 @@ def step(self, closure=None): mpu=self.mpu, use_graph=self.graph_harvesting) + for param_partition, grad_partition in zip(self.fp32_groups_flat_partition, + self.fp32_groups_gradient_flat_partition): + # In case of grad acc dtype different than FP32, need to cast to high precision. + param_partition.grad = grad_partition.to( + param_partition.dtype) if grad_partition.dtype != param_partition.dtype else grad_partition + self.optimizer.step() + if self.grad_acc_dtype is not torch.float32: + for param_partition in self.fp32_groups_flat_partition: + param_partition.grad = None + # We need to link optimizer state after the first step() call self._lazy_init_hp_params_optimizer_state() @@ -292,7 +334,7 @@ def backward(self, loss, update_hp_grads=True, clear_lp_grads=False, **bwd_kwarg self.clear_lp_grads() loss.backward(**bwd_kwargs) - if update_hp_grads: + if not self.immediate_grad_update and update_hp_grads: self.update_hp_grads(clear_lp_grads=clear_lp_grads) @torch.no_grad() @@ -310,7 +352,7 @@ def _update_hp_grad(self, lp, group_idx, param_idx, clear_lp_grads): # clear gradients if clear_lp_grads: - lp.grad._zero() + lp.grad.zero_() @torch.no_grad() def _update_hp_grads_func(self, clear_lp_grads=False): @@ -336,27 +378,55 @@ def update_hp_grads(self, clear_lp_grads=False): @torch.no_grad() def get_grads_for_reduction(self): - return self.fp32_groups_gradients_flat + if self.has_moe_layers: + return self.non_expert_gradients, self.expert_gradients + return self.non_expert_gradients, {} @torch.no_grad() def get_grads_for_norm(self, for_clipping=False): - grads = [] + """ + Returns: + tuple[list[Tensor], dict[ep_name, List[Tensor]] | list: + If for_clipping, return all gradients. + Otherwise, separate and return dict of expert_grad and list of non_expert_grad + """ + # (grads, expert_group_name) + expert_grads_for_norm = {} + + # grads + non_expert_grads_for_norm = [] + all_grads_for_clip = [] + tensor_mp_rank = bwc_tensor_model_parallel_rank(mpu=self.mpu) + assert len(self.bf16_groups) == len(self.optimizer.param_groups) for i, group in enumerate(self.bf16_groups): for j, lp in enumerate(group): if not for_clipping: if hasattr(lp, PIPE_REPLICATED) and lp.ds_pipe_replicated: continue - if not (tensor_mp_rank == 0 or is_model_parallel_parameter(lp)): + # skip duplicated parameters. perform norm only on cards with tp_rank=0. + # non-duplicated parameters include: + # - Parameters with tp: Use allreducesum of mp_group. + # - Moe Parameters with ep: Use allreducesum of ep_group. + if not (tensor_mp_rank == 0 or is_model_parallel_parameter(lp) or is_moe_param(lp)): continue if not self.fp32_groups_has_gradients[i][j]: continue - - grads.append(self.fp32_groups_gradients[i][j]) - - return grads + if not for_clipping: + param_group = self.optimizer.param_groups[i] + if self.has_moe_layers and is_moe_param_group(param_group): + if param_group['name'] not in expert_grads_for_norm: + expert_grads_for_norm[param_group['name']] = [] + expert_grads_for_norm[param_group['name']].append(self.fp32_groups_gradients[i][j]) + else: + non_expert_grads_for_norm.append(self.fp32_groups_gradients[i][j]) + else: + all_grads_for_clip.append(self.fp32_groups_gradients[i][j]) + if not for_clipping: + return non_expert_grads_for_norm, expert_grads_for_norm + return all_grads_for_clip @torch.no_grad() def update_lp_params(self): @@ -382,11 +452,20 @@ def clear_hp_grads(self): self.fp32_groups_has_gradients[i] = [False] * len(group) def clear_lp_grads(self): + + # using zero_() fixed memory address for graph replay + set_to_none = False if self.graph_harvesting else True + zero_grads_list = [] for group in self.bf16_groups: for param in group: - if param.grad is not None: - # Using zero_() fixed memory address for graph replay - param.grad.zero_() + if set_to_none: + param.grad = None + elif param.grad is not None: + if param.grad.grad_fn is not None: + param.grad.detach_() + zero_grads_list.append(param.grad) + if not set_to_none and len(zero_grads_list) > 0: + torch._foreach_zero_(zero_grads_list) def state_dict(self): state_dict = {} @@ -433,6 +512,7 @@ def _load_legacy_checkpoint(self, state_dict_list, load_optimizer_states=True, l self.clip_grad = current_rank_sd.get(CLIP_GRAD, self.clip_grad) if load_optimizer_states: + print(f"_load_legacy_checkpoint current_rank_sd[BASE_OPTIMIZER_STATE]") self.optimizer.load_state_dict(current_rank_sd[BASE_OPTIMIZER_STATE]) if load_from_fp32_weights: @@ -445,25 +525,16 @@ def _load_legacy_checkpoint(self, state_dict_list, load_optimizer_states=True, l self._link_all_hp_params() def _load_universal_checkpoint(self, checkpoint_folder, load_optimizer_states, load_from_fp32_weights): - self._load_hp_checkpoint_state(checkpoint_folder) + self.load_hp_checkpoint_state_from_checkpoint_dir("bf16_groups", checkpoint_folder) + + def _load_global_state(self, sd): + pass @property def param_groups(self): """Forward the wrapped optimizer's parameters.""" return self.optimizer.param_groups - def _load_hp_checkpoint_state(self, checkpoint_dir): - checkpoint_dir = os.path.join(checkpoint_dir, "zero") - tp_rank = bwc_tensor_model_parallel_rank(mpu=self.mpu) - tp_world_size = self.mpu.get_slice_parallel_world_size() - - for i, _ in enumerate(self.optimizer.param_groups): - for lp in self.bf16_groups[i]: - if lp._hp_mapping is not None: - #print(f"Loading {self.param_names[lp]} {tp_rank=} {tp_world_size=}") - lp.load_hp_checkpoint_state(os.path.join(checkpoint_dir, self.param_names[lp]), tp_rank, - tp_world_size) - def accumulate_hp_grads_and_remove_lp(self, lp_param, group_idx, param_idx): assert self.immediate_grad_update self._update_hp_grad(lp_param, group_idx, param_idx, clear_lp_grads=False) diff --git a/deepspeed/runtime/compiler.py b/deepspeed/runtime/compiler.py index b5e4e33425d0..879c0a1a2cc9 100644 --- a/deepspeed/runtime/compiler.py +++ b/deepspeed/runtime/compiler.py @@ -3,164 +3,14 @@ # DeepSpeed Team -from typing import Union, Callable, Dict, Any -import importlib import torch -from ..pydantic_v1 import validator -from .config_utils import DeepSpeedConfigModel - -COMPILE_CONFIG = "compile" def is_compile_supported(): - return hasattr(torch, "compiler") + return hasattr(torch, "compiler") and hasattr(torch.nn.Module, "compile") def disable(func): if is_compile_supported(): return torch.compiler.disable(func) return func - - -def get_compile_config(param_dict): - if COMPILE_CONFIG in param_dict: - compile_config_dict = param_dict[COMPILE_CONFIG] - else: - compile_config_dict = {} - return CompileConfig(**compile_config_dict) - - -def get_backend_fn(backend: Union[str, Callable]) -> Union[str, Callable]: - if isinstance(backend, Callable): - return backend - - elif isinstance(backend, str): - if backend in torch._dynamo.list_backends(exclude_tags=()): - return backend - - # Get module name from backend name - module_name = '.'.join(backend.split('.')[:-1]) - fn_name = backend.split('.')[-1] - - try: - module = importlib.import_module(module_name) - backend_fn = getattr(module, fn_name) - except ImportError: - raise ValueError( - f"The backend {backend} is not in the list of available backends and could not be imported.") - return backend_fn - - raise ValueError(f"backend for torch.compile must be a string or Callable: {backend}") - - -class CompileConfig(DeepSpeedConfigModel): - """ - [EXPERIMENTAL] This configuration enables users to activate `torch.compile` within DeepSpeed and customize its settings. - Please be aware that these features and API designs are experimental and subject to change. - """ - - enabled: bool = False - """ - Enable torch.compile when True. - """ - - backend: str = "inductor" - """ - Passed to `backend` argument of torch.compile. - If the given value is not in torch._dynamo.list_backends(), - DeepSpeed attempts to import and instantiate the module with the given name. - """ - - kwargs: Dict[str, Any] = {} - """ - Passed to `kwargs` argument of torch.compile. - """ - - @validator("enabled") - def validate_enabled(cls, field_value, values): - if field_value and not is_compile_supported(): - raise ValueError("torch.compile is not supported on this version of PyTorch.") - return field_value - - -class CompiledModuleWrapper(torch.nn.Module): - - def __init__(self, module, compile_config: Union[CompileConfig, None] = None): - super().__init__() - - assert is_compile_supported(), "torch.compile is not supported on this version of PyTorch." - - modules = self.__dict__.get('_modules') - modules['wrapped'] = module - self.__dict__['wrapped'] = module - self._is_compiled = False - self._backend = get_backend_fn(compile_config.backend) - self._compile_kwargs = compile_config.kwargs - self._compiler_fn = None - - def __getattr__(self, name): - return getattr(self.__dict__['wrapped'], name) - - def set_backend(self, backend: Union[str, Callable]): - """Set the backend for torch.compile. - - Args: - backend (Union[str, Callable]): backend name or a function that takes a torch.nn.Module and returns a compiled module. - You can directly pass a function that works as a backend. - See also `backend` field in `CompileConfig` for more details. - """ - self._backend = get_backend_fn(backend) - - def set_torch_compile_kwargs(self, kwargs: Dict[str, Union[str, Any]]) -> None: - """Set kwargs for torch.compile. Kwargs that are set in DeepSpeed config will be overwritten. - You can also pass a backend name with "backend" key to change the backend. - - Args: - kwargs (Dict[str, Union[str, Any]]): kwargs passed to torch.compile. - """ - - if "backend" in kwargs: - raise ValueError("backend cannot be set as compile kwargs. Use set_backend instead.") - self._compile_kwargs.update(kwargs) - - def set_compiler_fn(self, compiler_fn: Callable) -> None: - """Set a function to be used for compiling the module. - This function should take a torch.nn.Module as input and return a compiled module. - Note that other compile options are ignored when a compiler_fn is set. - - Example: - ```python - def my_compiler_fn(module: torch.nn.Module): - ... - return torch.compile(module, ...) - - engine.set_compiler_fn(my_compiler_fn) - ``` - """ - self._compiler_fn = compiler_fn - - def forward(self, *args, **kwargs) -> Any: - if not self.is_compiled: - if self._compiler_fn is None: - self.__dict__['wrapped'] = torch.compile(self.wrapped, backend=self._backend, **self._compile_kwargs) - else: - self.__dict__['wrapped'] = self._compiler_fn(self.wrapped) - self._is_compiled = True - - return self.__dict__['wrapped'](*args, **kwargs) - - @property - def is_compiled(self) -> bool: - return self._is_compiled - - @property - def backend(self) -> Union[str, Callable]: - return self._backend - - @property - def torch_compile_kwargs(self) -> Dict[str, Any]: - return self._compile_kwargs - - @property - def compiler_fn(self) -> Union[Callable, None]: - return self._compiler_fn diff --git a/deepspeed/runtime/config.py b/deepspeed/runtime/config.py index 975fb1f21501..73567277e2e1 100755 --- a/deepspeed/runtime/config.py +++ b/deepspeed/runtime/config.py @@ -31,7 +31,6 @@ from ..comm.config import DeepSpeedCommsConfig from ..monitor.config import get_monitor_config from ..inference.config import WeightQuantConfig -from .compiler import get_compile_config from deepspeed import comm as dist from deepspeed.runtime.config_utils import DeepSpeedConfigModel @@ -66,6 +65,8 @@ from .data_pipeline.config import get_data_efficiency_enabled, get_data_efficiency_config, get_curriculum_enabled_legacy, get_curriculum_params_legacy from .data_pipeline.constants import * +from ..utils.config import get_timers_config + TENSOR_CORE_ALIGN_SIZE = 8 ADAGRAD_OPTIMIZER = 'adagrad' @@ -169,6 +170,26 @@ def get_bfloat16_enabled(param_dict): return False +def get_fp8_optimizer_enabled(param_dict): + if FP8_OPTIMIZER in param_dict.keys(): + return get_scalar_param(param_dict[FP8_OPTIMIZER], FP8_OPTIMIZER_ENABLED, FP8_OPTIMIZER_ENABLED_DEFAULT) + return FP8_OPTIMIZER_ENABLED_DEFAULT + + +def get_fp8_optimizer_master_weights_dtype(param_dict): + if FP8_OPTIMIZER in param_dict.keys(): + val = get_scalar_param(param_dict[FP8_OPTIMIZER], FP8_OPTIMIZER_MASTER_WEIGHTS_DTYPE, + FP8_OPTIMIZER_MASTER_WEIGHTS_DTYPE_DEFAULT) + if val == "fp32": + return torch.float32 + elif val == "fp16": + return torch.float16 + elif val == "bf16": + return torch.bfloat16 + raise ValueError(f"Invalid master_weights_dtype. Supported data types: ['fp16', 'bfp16', 'fp32']. Got: {val}") + return torch.float32 + + def get_bfloat16_immediate_grad_update(param_dict): for key in [BFLOAT16, BFLOAT16_OLD]: if key in param_dict.keys(): @@ -829,6 +850,8 @@ def _initialize_params(self, param_dict): self.bfloat16_immediate_grad_update = get_bfloat16_immediate_grad_update(param_dict) assert not (self.fp16_enabled and self.bfloat16_enabled), 'bfloat16 and fp16 modes cannot be simultaneously enabled' + self.fp8_optimizer_enabled = get_fp8_optimizer_enabled(param_dict) + self.fp8_optimizer_master_weights_dtype = get_fp8_optimizer_master_weights_dtype(param_dict) self.fp16_master_weights_and_gradients = get_fp16_master_weights_and_grads_enabled(param_dict) self.amp_enabled = get_amp_enabled(param_dict) self.amp_params = get_amp_params(param_dict) @@ -909,7 +932,7 @@ def _initialize_params(self, param_dict): self.weight_quantization_config = WeightQuantConfig( **param_dict['weight_quantization']) if 'weight_quantization' in param_dict else None - self.compile_config = get_compile_config(param_dict) + self.timers_config = get_timers_config(param_dict) def _batch_assertion(self): diff --git a/deepspeed/runtime/constants.py b/deepspeed/runtime/constants.py index 679230ca7d4c..e9fa9f131c1e 100755 --- a/deepspeed/runtime/constants.py +++ b/deepspeed/runtime/constants.py @@ -130,6 +130,25 @@ BFLOAT16_IMMEDIATE_GRAD_UPDATE = "immediate_grad_update" BFLOAT16_IMMEDIATE_GRAD_UPDATE_DEFAULT = False +######################################### +# FP8 optimizer support +######################################### +# By default, this feature is not enabled. +# Users can configure in ds_config.json as below example: +FP8_FORMAT = ''' +FP8 parameters should be of the format: +"fp8_optimizer": { + "enabled": true +} +''' +FP8_OPTIMIZER = "fp8_optimizer" + +FP8_OPTIMIZER_ENABLED = "enabled" +FP8_OPTIMIZER_ENABLED_DEFAULT = False + +FP8_OPTIMIZER_MASTER_WEIGHTS_DTYPE = "master_weights_dtype" +FP8_OPTIMIZER_MASTER_WEIGHTS_DTYPE_DEFAULT = "fp32" + ######################################### # FP16 support ######################################### diff --git a/deepspeed/runtime/engine.py b/deepspeed/runtime/engine.py index 5c1202ba06ae..1a1c5c6586bc 100644 --- a/deepspeed/runtime/engine.py +++ b/deepspeed/runtime/engine.py @@ -34,6 +34,7 @@ from deepspeed.runtime.fp16.fused_optimizer import FP16_Optimizer from deepspeed.runtime.fp16.unfused_optimizer import FP16_UnfusedOptimizer from deepspeed.runtime.bf16_optimizer import BF16_Optimizer +from deepspeed.runtime.fp8_optimizer import FP8_Optimizer from deepspeed.runtime.config import DEEPSPEED_OPTIMIZERS, \ ADAGRAD_OPTIMIZER, ADAM_OPTIMIZER, ADAMW_OPTIMIZER, LAMB_OPTIMIZER, ONEBIT_ADAM_OPTIMIZER, ONEBIT_LAMB_OPTIMIZER, \ @@ -90,15 +91,17 @@ from .pipe.module import PipelineModule from .utils import get_ma_status -from .compiler import CompiledModuleWrapper +from .compiler import is_compile_supported from ..ops.adam import FusedAdam from ..moe.sharded_moe import TopKGate, MOELayer +from ..moe.capacity_bins import optimize_bins from ..moe.layer import MoE from ..moe.utils import is_moe_param from ..git_version_info import version from deepspeed.profiling.flops_profiler.profiler import FlopsProfiler from deepspeed.utils.logging import print_json_dist, print_configuration +from deepspeed.utils.bwc import bwc_tensor_model_parallel_rank from deepspeed.accelerator import get_accelerator @@ -222,6 +225,7 @@ def __init__(self, self.num_experts = [] self.gate_modules = [] self.moe_layers = [] + self.has_sequence_parallel_params = False self._step_applied = False self._global_grad_norm = None self.use_ds_comm = False # False --> Use torch.dist, True --> Use ds.comm backend. @@ -239,6 +243,7 @@ def __init__(self, self._configure_with_arguments(args, mpu) self._do_sanity_check() see_memory_usage(f"DeepSpeed Engine: After args sanity test", force=self.memory_breakdown()) + if mpu is not None: if self.elasticity_enabled(): if not self.is_elastic_model_parallel_supported(): @@ -249,6 +254,7 @@ def __init__(self, dist.configure(self._config) + self.fp8_optimizer = self._config.fp8_optimizer_enabled self.monitor = MonitorMaster(self._config.monitor_config) see_memory_usage( @@ -271,11 +277,10 @@ def __init__(self, # Configure wall clock timers self.timers = SynchronizedWallClockTimer() # Throughput timer - self.tput_timer = ThroughputTimer( - batch_size=self.train_batch_size(), - steps_per_output=self.steps_per_print(), - monitor_memory=False, - ) + self.tput_timer = ThroughputTimer(self._config.timers_config, + batch_size=self.train_batch_size(), + steps_per_output=self.steps_per_print(), + monitor_memory=False) log_dist(f"DeepSpeed Flops Profiler Enabled: {self.flops_profiler_enabled()}", ranks=[0]) @@ -313,6 +318,14 @@ def __init__(self, elif self.bfloat16_enabled(): self.optimizer = self._configure_bf16_optimizer(optimizer=None) + #Sequence parallel related initialization + for param in self.module.parameters(): + if getattr(param, 'sequence_parallel', False): + self.has_sequence_parallel_params = True + break + if self.has_sequence_parallel_params: + assert self.mpu is not None, "sequence parallel allreduce only supported with tensor parallel enabled" + # Hook optimizer for snip_momentum pruning if hasattr(model, 'pruners'): from ..compression.helper import rewrite_optimizer_step @@ -362,8 +375,7 @@ def __init__(self, self.flatten = _flatten_dense_tensors self.unflatten = _unflatten_dense_tensors - if self._config.compile_config.enabled: - self._set_client_model(CompiledModuleWrapper(self.module, self._config.compile_config)) + self._is_compiled = False def destroy(self): if self.optimizer is not None and hasattr(self.optimizer, 'destroy'): @@ -455,7 +467,10 @@ def get_global_grad_norm(self) -> float: Returns: float: norm """ - return self._global_grad_norm + grad_norm = self._global_grad_norm + if isinstance(grad_norm, torch.Tensor): + grad_norm = grad_norm.item() + return grad_norm def __getattr__(self, name): """ @@ -469,13 +484,6 @@ def __getattr__(self, name): return getattr(self, name) elif name in dir(_module): return getattr(_module, name) - elif isinstance(_module, CompiledModuleWrapper): - try: - return getattr(_module, name) - except AttributeError: - raise AttributeError( - f"None of {type(self).__name__}, CompiledModuleWrapper, or the wrapped model has the attribute '{name}'" - ) else: raise AttributeError(f"'{type(self).__name__}' object has no attribute '{name}'") @@ -977,13 +985,13 @@ def _set_distributed_vars(self, args): device_rank = args.device_rank if args is not None and hasattr(args, 'device_rank') else self.local_rank if device_rank >= 0: get_accelerator().set_device(device_rank) - self.device = torch.device(get_accelerator().device_name(), device_rank) + self.device = torch.device(get_accelerator().device_name(device_rank)) self.world_size = dist.get_world_size() self.global_rank = dist.get_rank() else: self.world_size = 1 self.global_rank = 0 - self.device = torch.device(get_accelerator().device_name()) + self.device = get_accelerator().device() # Configure based on command line arguments def _configure_with_arguments(self, args, mpu): @@ -1254,6 +1262,8 @@ def _configure_optimizer(self, client_optimizer, model_parameters): if optimizer_wrapper == ZERO_OPTIMIZATION: self.optimizer = self._configure_zero_optimizer(basic_optimizer) + elif self.fp8_optimizer: + self.optimizer = self._configure_fp8_optimizer(basic_optimizer) elif optimizer_wrapper == AMP: amp_params = self.amp_params() log_dist(f"Initializing AMP with these params: {amp_params}", ranks=[0]) @@ -1268,7 +1278,7 @@ def _configure_optimizer(self, client_optimizer, model_parameters): else: self.optimizer = basic_optimizer - log_dist("DeepSpeed Final Optimizer = {}".format(self.optimizer_name()), ranks=[0]) + log_dist("DeepSpeed Final Optimizer = {}".format(self.optimizer.__class__.__name__), ranks=[0]) self.compression_scheduler = self._configure_compression_scheduler() self.quantizer = self._configure_quantization() @@ -1478,7 +1488,30 @@ def _configure_bf16_optimizer(self, optimizer): timers=timers, grad_acc_dtype=self.get_data_types()[1], graph_harvesting=self.graph_harvesting(), - immediate_grad_update=self._config.bfloat16_immediate_grad_update) + immediate_grad_update=self._config.bfloat16_immediate_grad_update, + has_moe_layers=self.has_moe_layers) + + return optimizer + + def _configure_fp8_optimizer(self, optimizer): + clip_grad = self.gradient_clipping() + + if optimizer is None: + optimizer = DummyOptim(list(self.module.parameters())) + + log_dist('Creating FP8 optimizer', ranks=[0]) + + timers = self.timers if self.wall_clock_breakdown() else NoopTimer() + optimizer = FP8_Optimizer(optimizer, + self.param_names, + mpu=self.mpu, + clip_grad=clip_grad, + allgather_bucket_size=self.zero_allgather_bucket_size(), + dp_process_group=self.seq_data_parallel_group, + timers=timers, + grad_acc_dtype=self.get_data_types()[1], + immediate_grad_update=self._config.bfloat16_immediate_grad_update, + master_weights_dtype=self._config.fp8_optimizer_master_weights_dtype) return optimizer @@ -1924,9 +1957,6 @@ def allreduce_gradients(self, bucket_size=MEMORY_OPT_ALLREDUCE_SIZE): self.optimizer.reduce_gradients(pipeline_parallel=self.pipeline_parallelism) else: grads = None - if hasattr(self.optimizer, "get_grads_for_reduction"): - # This is currently for BF16 optimizer - grads = self.optimizer.get_grads_for_reduction() self.buffered_allreduce_fallback(grads=grads, elements_per_buffer=bucket_size) @instrument_w_nvtx @@ -2335,7 +2365,7 @@ def _report_progress(self, step): mom = self.get_mom() log_dist(f"step={step}, skipped={self.skipped_steps}, lr={lr}, mom={mom}", ranks=[0]) - def allreduce_bucket(self, bucket, dp_group): + def allreduce_bucket(self, bucket, dp_group, dp_world_size=None): tensor = self.flatten(bucket) tensor_to_allreduce = tensor @@ -2343,16 +2373,18 @@ def allreduce_bucket(self, bucket, dp_group): if self.communication_data_type != tensor.dtype: tensor_to_allreduce = tensor.to(self.communication_data_type) + if dp_world_size is None: + dp_world_size = dist.get_world_size(group=dp_group) if self.postscale_gradients(): if self.gradient_predivide_factor() != 1.0: tensor_to_allreduce.mul_(1.0 / self.gradient_predivide_factor()) dist.all_reduce(tensor_to_allreduce, group=dp_group) if self.gradient_average: - if self.gradient_predivide_factor() != dist.get_world_size(group=dp_group): - tensor_to_allreduce.mul_(self.gradient_predivide_factor() / dist.get_world_size(group=dp_group)) + if self.gradient_predivide_factor() != dp_world_size: + tensor_to_allreduce.mul_(self.gradient_predivide_factor() / dp_world_size) else: - tensor_to_allreduce.mul_(1. / dist.get_world_size(group=dp_group)) + tensor_to_allreduce.mul_(1. / dp_world_size) dist.all_reduce(tensor_to_allreduce, group=dp_group) if self.communication_data_type != tensor.dtype and tensor is not tensor_to_allreduce: @@ -2360,23 +2392,23 @@ def allreduce_bucket(self, bucket, dp_group): return tensor - def allreduce_and_copy(self, small_bucket, dp_group): - allreduced = self.allreduce_bucket(small_bucket, dp_group) + def allreduce_and_copy(self, small_bucket, dp_group, dp_world_size=None): + allreduced = self.allreduce_bucket(small_bucket, dp_group, dp_world_size) for buf, synced in zip(small_bucket, self.unflatten(allreduced, small_bucket)): buf.copy_(synced) - def allreduce_no_retain(self, bucket, dp_group, numel_per_bucket=500000000): + def allreduce_no_retain(self, bucket, dp_group, numel_per_bucket=500000000, dp_world_size=None): small_bucket = [] numel = 0 for tensor in bucket: small_bucket.append(tensor) numel = numel + tensor.numel() if numel > numel_per_bucket: - self.allreduce_and_copy(small_bucket, dp_group) + self.allreduce_and_copy(small_bucket, dp_group, dp_world_size) small_bucket = [] numel = 0 if len(small_bucket) > 0: - self.allreduce_and_copy(small_bucket, dp_group) + self.allreduce_and_copy(small_bucket, dp_group, dp_world_size) def _get_gradients_for_reduction(self): non_expert_grads = [] @@ -2427,26 +2459,35 @@ def _reduce_non_expert_gradients(self, grads, elements_per_buffer): self.allreduce_no_retain(dense_bucket, dp_group=dp_group, numel_per_bucket=elements_per_buffer) def _reduce_expert_gradients(self, expert_grads, elements_per_buffer): + # to maintain the gradients value unaffected by ep_size setting, + # utilize dp_world_size for allreduce average + dp_world_size = dist.get_world_size(groups._get_data_parallel_group()) for ep_name, expert_grads_group in expert_grads.items(): + ep_dp_group = groups._get_expert_data_parallel_group(ep_name) split_sparse_tensor_buckets, split_dense_tensor_buckets = split_half_float_double_sparse( expert_grads_group) for _, sparse_bucket_tuple in enumerate(split_sparse_tensor_buckets): if sparse_bucket_tuple: bucket_type, sparse_bucket = sparse_bucket_tuple - self.sparse_allreduce_no_retain(sparse_bucket, groups._get_expert_data_parallel_group(ep_name)) + self.sparse_allreduce_no_retain(sparse_bucket, dp_group=ep_dp_group, dp_world_size=dp_world_size) for _, dense_bucket_tuple in enumerate(split_dense_tensor_buckets): if dense_bucket_tuple: bucket_type, dense_bucket = dense_bucket_tuple # Separate between diff groups self.allreduce_no_retain(dense_bucket, - dp_group=groups._get_expert_data_parallel_group(ep_name), - numel_per_bucket=elements_per_buffer) + dp_group=ep_dp_group, + numel_per_bucket=elements_per_buffer, + dp_world_size=dp_world_size) def buffered_allreduce_fallback(self, grads=None, elements_per_buffer=500000000): if grads is None: - non_expert_grads, expert_grads = self._get_gradients_for_reduction() + if hasattr(self.optimizer, "get_grads_for_reduction"): + # This is currently for BF16 optimizer + non_expert_grads, expert_grads = self.optimizer.get_grads_for_reduction() + else: + non_expert_grads, expert_grads = self._get_gradients_for_reduction() else: assert not self.has_moe_layers, "attempting to reduce grads in unsupported way w.r.t. MoE" non_expert_grads = grads @@ -2456,8 +2497,19 @@ def buffered_allreduce_fallback(self, grads=None, elements_per_buffer=500000000) if self.has_moe_layers: self._reduce_expert_gradients(expert_grads, elements_per_buffer) - def sparse_allreduce_no_retain(self, bucket, dp_group): - allreduced_sparses = self.sparse_allreduce_bucket(bucket, dp_group) + if self.has_sequence_parallel_params: + for i, group in enumerate(self.optimizer.bf16_groups): + if group is None: + dtypes = self.optimizer.optimizer.param_groups[i]['group_dtypes'] + group = self.optimizer.fp8_param_groups_dict[dtypes[0]]["param_groups"][i] + for j, lp in enumerate(group): + if getattr(lp, 'sequence_parallel', False): + dist.all_reduce(self.optimizer.fp32_groups_gradients[i][j], + op=dist.ReduceOp.SUM, + group=self.mpu.get_slice_parallel_group()) + + def sparse_allreduce_no_retain(self, bucket, dp_group, dp_world_size=None): + allreduced_sparses = self.sparse_allreduce_bucket(bucket, dp_group, dp_world_size) # Densify sparse tensor and copy back to original location for tensor in allreduced_sparses: if tensor.is_sparse: @@ -2465,13 +2517,13 @@ def sparse_allreduce_no_retain(self, bucket, dp_group): else: tensor.orig_dense_tensor.copy_(tensor.to_dense()) - def sparse_allreduce_bucket(self, bucket, dp_group): + def sparse_allreduce_bucket(self, bucket, dp_group, dp_world_size=None): sparse_list = [] for sparse in bucket: - sparse_list.append(self.sparse_allreduce(sparse, dp_group)) + sparse_list.append(self.sparse_allreduce(sparse, dp_group, dp_world_size)) return sparse_list - def sparse_allreduce(self, sparse, dp_group): + def sparse_allreduce(self, sparse, dp_group, dp_world_size=None): original_data_type = sparse.values.dtype if self.communication_data_type != sparse.values.dtype: if self.communication_data_type in (torch.float16, torch.bfloat16): @@ -2483,12 +2535,13 @@ def sparse_allreduce(self, sparse, dp_group): indices = sparse.indices values = sparse.values + if dp_world_size is None: + dp_world_size = dist.get_world_size(group=dp_group) if self.postscale_gradients(): if self.gradient_average: - values.mul_(self.gradient_predivide_factor() / - (dist.get_world_size(group=dp_group) / float(self.sequence_parallel_size))) + values.mul_(self.gradient_predivide_factor() / (dp_world_size / float(self.sequence_parallel_size))) else: - values.mul_(1. / (dist.get_world_size(group=dp_group) / float(self.sequence_parallel_size))) + values.mul_(1. / (dp_world_size / float(self.sequence_parallel_size))) indices_device_list = self.sparse_all_gather(indices, dp_group) values_device_list = self.sparse_all_gather(values, dp_group) @@ -2759,7 +2812,7 @@ def load_checkpoint(self, load_zero_checkpoint = load_path is not None and (self.zero_optimization() or self.bfloat16_enabled()) if load_zero_checkpoint: - if load_optimizer_states and not load_module_only: + if (load_optimizer_states and not load_module_only) or self.load_universal_checkpoint(): success = self._load_zero_checkpoint(load_dir, tag, load_optimizer_states=load_optimizer_states) else: success = False @@ -2784,8 +2837,6 @@ def load_checkpoint(self, if self.load_universal_checkpoint(): self.optimizer.update_lp_params() - if load_zero_checkpoint: - self.update_optimizer_step(step=client_states['iteration'] + 1) return load_path, client_states @@ -2963,24 +3014,6 @@ def _load_zero_checkpoint(self, load_dir, tag, load_optimizer_states=True): logger.info(f"loading {len(zero_sd_list)} zero partition checkpoints for rank {self.global_rank}") return True - def update_optimizer_step(self, step): - - def set_step(d): - if isinstance(d['step'], torch.Tensor): - d['step'] = torch.tensor(step, dtype=d['step'].dtype, device=d['step'].device) - else: - d['step'] = step - - optimizer = self.optimizer - base_optimizer = optimizer.optimizer - state = base_optimizer.state - for group in optimizer.param_groups: - if 'step' in group: - set_step(group) - for p in group['params']: - if p in state and len(state[p]) > 0 and 'step' in state[p]: - set_step(state[p]) - def _get_mp_rank_zero_checkpoint_names(self, load_dir, tag, mp_rank, dp_world_size, bf16_mode): zero_ckpt_names = [] for dp_rank in range(dp_world_size): @@ -3236,9 +3269,12 @@ def _save_moe_checkpoint(self, save_dir, tag, client_state={}, exclude_frozen_pa # Load flow uses below saved file for model parameters, RNG and more if groups._get_data_parallel_rank() == 0: - # get non-moe parameters + # Get non-moe parameters + # Classes DeepSpeedEngine and PipelineEngine have different behavior for method module_state_dict. + # DeepSpeedEngine returns the state dict, where PipelineEngine saves the state dict and returns None. + # We need to get the state dict, therefore, call to DeepSpeedEngine (base class for PipelineEngine) model_state_dict = self._get_non_moe_state_dict( - self.module_state_dict(exclude_frozen_parameters=exclude_frozen_parameters)) + DeepSpeedEngine.module_state_dict(self, exclude_frozen_parameters=exclude_frozen_parameters)) # TODO: update num experts info,.. in checkpoint state = { @@ -3613,3 +3649,101 @@ def empty_partition_cache(self): self.optimizer.empty_partition_cache() gc.collect() get_accelerator().empty_cache() + + def compile(self, backend=get_accelerator().get_compile_backend(), compile_kwargs={}) -> None: + """Compile the module using the specified backend and kwargs. + If a compiler_fn is set, it will be used instead of torch.compile(). + """ + if not is_compile_supported(): + raise RuntimeError("compile is not supported in your version of PyTorch.") + + if self.is_compiled: + return + + self.module.compile(backend=backend, **compile_kwargs) + self._is_compiled = True + + @property + def is_compiled(self) -> bool: + return self._is_compiled + + def optimize_moe(self, step, max_grouped_experts=1): + """ Optimize MoE gate capacity bins + + If MoE is using capacity bins, optimize the bins based on running stats. + In order to reduce the number of compilation recipes, we optimize a set + of grouped gates together. + The grouped gates must have same number of experts. + """ + if not self.has_moe_layers: + return + + # find all gates with capacity factor + gate_with_capacity_bins_idx = [i for i, gate in enumerate(self.gate_modules) if gate.has_capacity_bins()] + if len(gate_with_capacity_bins_idx) == 0: + return + + # handle only gates have capacity bins usage statistics + gate_capacity_bin_stats = OrderedDict() + for i in gate_with_capacity_bins_idx: + gate = self.gate_modules[i] + if hasattr(gate, 'get_stats'): + stats = gate.get_stats(incremental=False) + if stats is not None and 'capacity_bins' in stats: + gate_capacity_bin_stats[i] = stats['capacity_bins'] + if len(gate_capacity_bin_stats) == 0: + return + + del gate_with_capacity_bins_idx # removing the list because it is out of date + + # divide gates into groups up to max_grouped_experts or until different num_experts encountered + gate_groups = [] + first_gate_idx = list(gate_capacity_bin_stats.keys())[0] + current_group = [first_gate_idx] + current_group_n_experts = self.num_experts[first_gate_idx] + for i in list(gate_capacity_bin_stats.keys())[1:]: + if self.num_experts[i] == current_group_n_experts and len(current_group) < max_grouped_experts: + current_group.append(i) + else: + gate_groups.append(current_group) + current_group = [i] + current_group_n_experts = self.num_experts[i] + gate_groups.append(current_group) + + # print new optimized groups for each pipeline stage (no sharing across pp stages) + dp_rank = dist.get_rank(group=self.mpu.get_data_parallel_group()) + tp_rank = bwc_tensor_model_parallel_rank(self.mpu) + log_ranks = [self.global_rank] if dp_rank == 0 and tp_rank == 0 else [] + + # for each group, (1) accumulate stats (2) calculate optimized capacity and (3) reconfigure bins + for gate_group in gate_groups: + group_stats = [] + for i in gate_group: + group_stats.append(gate_capacity_bin_stats[i]) + + # sanity - verify all gates in groups have same bins edges + bins_edges = [stats['edges'] for stats in group_stats] + same_edges = all(torch.equal(bins_edges[0], tensor) for tensor in bins_edges[1:]) + assert same_edges, f'Got different capacity bin edges for group={gate_group} edges={bins_edges}' + + # accumulate usage + stacked_usage = torch.stack([stats['usage'] for stats in group_stats], dim=0) + total_group_usage = torch.sum(stacked_usage, dim=0) + + # find optimized bins for this group + min_range = group_stats[0]['min_range'] + current_bins = group_stats[0]['edges'] + alignment = group_stats[0]['alignment'] + min_bin_size = group_stats[0]['min_bin_size'] + new_bins = optimize_bins(min_range=min_range, + bins=current_bins, + bins_usage=total_group_usage, + alignment=alignment, + min_bin_size=min_bin_size) + + # configure gates in group with new bins + for i in gate_group: + gate = self.gate_modules[i] + capacity_bins = gate.get_capacity_bins() + capacity_bins.set_bins(new_bins) + log_dist(f'step={step}, optimize capacity bins for group={gate_group} bins={new_bins}', ranks=log_ranks) diff --git a/deepspeed/runtime/fp16/fused_optimizer.py b/deepspeed/runtime/fp16/fused_optimizer.py index 182f806c839c..9ed250252e17 100755 --- a/deepspeed/runtime/fp16/fused_optimizer.py +++ b/deepspeed/runtime/fp16/fused_optimizer.py @@ -10,13 +10,13 @@ import torch from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors -from deepspeed.runtime import DeepSpeedOptimizer -from deepspeed.runtime.utils import get_global_norm, get_grad_norm, CheckOverflow, get_weight_norm, required_torch_version +from deepspeed.runtime.base_optimizer import DeepSpeedOptimizer +from deepspeed.runtime.utils import get_global_norm, get_grad_norm, CheckOverflow, get_weight_norm, required_torch_version, get_norm_with_moe_layers from deepspeed.runtime.fp16.loss_scaler import INITIAL_LOSS_SCALE, SCALE_WINDOW, MIN_LOSS_SCALE -from deepspeed.utils import groups, logger, log_dist -from deepspeed import comm as dist +from deepspeed.utils import logger, log_dist from deepspeed.checkpoint.constants import OPTIMIZER_STATE_DICT, CLIP_GRAD from deepspeed.accelerator import get_accelerator +from deepspeed.moe.utils import is_moe_param_group OVERFLOW_CHECK_TIMER = 'overflow_check' COMPUTE_NORM_TIMER = 'compute_norm' @@ -237,6 +237,10 @@ def step(self, closure=None): return self.overflow grads_groups_flat = [] + non_experts_grads_for_norm = [] + expert_grads_for_norm = {} + assert len(self.fp16_groups) == len(self.optimizer.param_groups) + for i, group in enumerate(self.fp16_groups): data_type = self.fp32_groups_flat[i].dtype @@ -250,15 +254,25 @@ def step(self, closure=None): p.grad = None self.fp32_groups_flat[i].grad = grads_groups_flat[i] + param_group = self.optimizer.param_groups[i] + if self.has_moe_layers and is_moe_param_group(param_group): + if param_group['name'] not in expert_grads_for_norm: + expert_grads_for_norm[param_group['name']] = [] + expert_grads_for_norm[param_group['name']].append(self.fp32_groups_flat[i]) + else: + non_experts_grads_for_norm.append(self.fp32_groups_flat[i]) self.timers(COMPUTE_NORM_TIMER).start() - all_groups_norm = get_grad_norm(self.fp32_groups_flat, mpu=self.mpu) + all_groups_norm = get_grad_norm(non_experts_grads_for_norm, mpu=self.mpu) self.timers(COMPUTE_NORM_TIMER).stop() if self.has_moe_layers: - all_groups_norm = self._get_norm_with_moe_layers(all_groups_norm) + all_groups_norm = get_norm_with_moe_layers(all_groups_norm, + mpu=self.mpu, + expert_tensors=expert_grads_for_norm, + norm_type=self.norm_type) scaled_global_grad_norm = get_global_norm(norm_list=[all_groups_norm]) @@ -290,20 +304,6 @@ def step(self, closure=None): return self.overflow - def _get_norm_with_moe_layers(self, all_groups_norm): - #all_groups_norm_old = all_groups_norm - # Need to allreduce (avg) the norms across different ranks because moe params will not be synced during allreduce - if self.using_pipeline: - pg = self.deepspeed.mpu.get_data_parallel_group() - else: - pg = groups._get_data_parallel_group() - scaled_norm = all_groups_norm * 1.0 / float(dist.get_world_size(group=pg)) - scaled_norm_tensor = torch.tensor(scaled_norm, device=self.fp32_groups_flat[0].device, dtype=torch.float) - dist.all_reduce(scaled_norm_tensor, group=pg) - all_groups_norm = scaled_norm_tensor.item() - #print(f"old = {all_groups_norm_old} and new = {all_groups_norm} at rank: {deepspeed.comm.get_rank()}") - return all_groups_norm - def unscale_and_clip_grads(self, grad_groups_flat, total_norm, apply_scale=True): # compute combined scale factor for this group combined_scale = self.cur_scale diff --git a/deepspeed/runtime/fp16/unfused_optimizer.py b/deepspeed/runtime/fp16/unfused_optimizer.py index 14271255df2e..a7fd1910d7b2 100755 --- a/deepspeed/runtime/fp16/unfused_optimizer.py +++ b/deepspeed/runtime/fp16/unfused_optimizer.py @@ -11,7 +11,7 @@ import torch from torch._utils import _flatten_dense_tensors -from deepspeed.runtime import DeepSpeedOptimizer +from deepspeed.runtime.base_optimizer import DeepSpeedOptimizer from deepspeed.runtime.utils import get_global_norm, CheckOverflow, get_weight_norm, required_torch_version from deepspeed.runtime.fp16.loss_scaler import INITIAL_LOSS_SCALE, SCALE_WINDOW, MIN_LOSS_SCALE from deepspeed.utils import logger diff --git a/deepspeed/runtime/fp8_optimizer.py b/deepspeed/runtime/fp8_optimizer.py new file mode 100644 index 000000000000..bcafe7cbf8c4 --- /dev/null +++ b/deepspeed/runtime/fp8_optimizer.py @@ -0,0 +1,768 @@ +# Copyright (c) 2023 Habana Labs, Ltd. an Intel Company +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from collections import OrderedDict +import torch +import sys +import os +from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors +from deepspeed import comm as dist +from deepspeed.runtime.constants import PIPE_REPLICATED +from deepspeed.runtime.base_optimizer import ZeROOptimizer +from packaging import version as pkg_version +from deepspeed.git_version_info import version +from deepspeed.runtime.utils import (get_global_norm_of_tensors, clip_tensors_by_global_norm, DummyOptim, + align_dense_tensors, all_gather_dp_groups, is_model_parallel_parameter, + see_memory_usage, graph_process, get_norm_with_moe_layers) +from deepspeed.utils import link_hp_params, lazy_init_hp_params_optimizer_state, fragment_address, groups, map_to_flat_opt_states +from deepspeed.moe.utils import is_moe_param, is_moe_param_group +from deepspeed.utils.bwc import bwc_tensor_model_parallel_rank +from deepspeed.checkpoint import enable_universal_checkpoint +from deepspeed.checkpoint.constants import (DS_VERSION, PARTITION_COUNT, BASE_OPTIMIZER_STATE, + SINGLE_PARTITION_OF_FP32_GROUPS, CLIP_GRAD, GROUP_PADDINGS, + PARAM_SLICE_MAPPINGS) +from deepspeed.accelerator import get_accelerator + +setattr(sys.modules[__name__], 'fragment_address', fragment_address) + + +class FP8_Optimizer(ZeROOptimizer): + + def __init__(self, + init_optimizer, + param_names, + mpu=None, + clip_grad=0.0, + norm_type=2, + allgather_bucket_size=5000000000, + dp_process_group=None, + timers=None, + grad_acc_dtype=None, + graph_harvesting=False, + immediate_grad_update=False, + has_moe_layers=False, + master_weights_dtype=None): + super().__init__() + see_memory_usage('begin fp8_optimizer', force=True) + self.timers = timers + self.optimizer = init_optimizer + self.param_names = param_names + self.using_real_optimizer = not isinstance(self.optimizer, DummyOptim) + + assert grad_acc_dtype in [torch.float32, torch.bfloat16 + ], f"FP8_Optimizer: Unsupported gradient accumulation data type: {grad_acc_dtype}" + self.grad_acc_dtype = grad_acc_dtype + self.immediate_grad_update = immediate_grad_update + + self.clip_grad = clip_grad + self.norm_type = norm_type + self.mpu = mpu + self.allgather_bucket_size = int(allgather_bucket_size) + self.dp_process_group = dp_process_group + self.dp_rank = dist.get_rank(group=self.dp_process_group) + self.has_moe_layers = has_moe_layers + self.non_expert_gradients = [] + self.real_dp_process_group = [dp_process_group for i in range(len(self.optimizer.param_groups))] + if self.has_moe_layers: + self._configure_moe_settings() + self.master_weights_dtype = master_weights_dtype + + # Use torch (un)flatten ops + self.flatten = _flatten_dense_tensors + self.unflatten = _unflatten_dense_tensors + + #align nccl all-gather send buffers to 4-bye boundary + self.nccl_start_alignment_factor = 2 # 4-byte alignment/sizeof(fp16) = 2 + + # Build BF16/FP32 groups + self.bf16_groups = [] + self.bf16_groups_flat = [] + self.bf16_partitioned_groups = [] + + self.fp8_param_groups_dict = {torch.float8_e5m2: {"param_groups": [], "param_groups_flat": [], "param_partitioned_groups": []},\ + torch.float8_e4m3fn: {"param_groups": [], "param_groups_flat": [], "param_partitioned_groups": []}} + + self.fp32_groups_flat_partition = [] + + # Maintain different fp32 gradients views for convenience + self.fp32_groups_gradients = [] + self.fp32_groups_gradient_dict = {} + self.fp32_groups_gradients_flat = [] + self.fp32_groups_actual_gradients_flat = [] + self.fp32_groups_gradient_flat_partition = [] + self.fp32_groups_has_gradients = [] + + self.group_paddings = [] + self.graph_harvesting = graph_harvesting + if self.using_real_optimizer: + self._setup_for_real_optimizer() + + see_memory_usage('end fp8_optimizer', force=True) + + def _configure_moe_settings(self): + assert any( + [is_moe_param_group(group) for group in self.optimizer.param_groups] + ), "The model has moe layers, but None of the param groups are marked as MoE. Create a param group with 'moe' key set to True before creating optimizer" + + for i, group in enumerate(self.optimizer.param_groups): + if is_moe_param_group(group): + assert all([is_moe_param(param) + for param in group['params']]), "All params in MoE group must be MoE params" + self.real_dp_process_group[i] = groups._get_expert_data_parallel_group(group['name']) + self.expert_gradients = {} + if self.has_moe_layers: + for key in groups._get_expert_data_parallel_group_dict().keys(): + self.expert_gradients[key] = [] + + def _setup_for_real_optimizer(self): + self.partition_count = [dist.get_world_size(group=pg) for pg in self.real_dp_process_group] + + for i, param_group in enumerate(self.optimizer.param_groups): + real_dp_world_size = dist.get_world_size(group=self.real_dp_process_group[i]) + see_memory_usage(f'before initializing group {i}', force=True) + + partition_id = dist.get_rank(group=self.real_dp_process_group[i]) + + # grab the original list + trainable_parameters = [param for param in param_group['params'] if param.requires_grad] + if param_group.get('group_dtypes'): + self.bf16_groups.append(None) + self.bf16_groups_flat.append(None) + self.bf16_partitioned_groups.append(None) + self._setup_for_fp8_params(real_dp_world_size, i, param_group, partition_id, trainable_parameters) + else: + for dtype in self.fp8_param_groups_dict: + self.fp8_param_groups_dict[dtype]["param_groups"].append(None) + self.fp8_param_groups_dict[dtype]["param_groups_flat"].append(None) + self.fp8_param_groups_dict[dtype]["param_partitioned_groups"].append(None) + self._setup_for_bf16_params(real_dp_world_size, i, param_group, partition_id, trainable_parameters) + + # update optimizer param groups to reference fp32 params partition + param_group['params'] = [self.fp32_groups_flat_partition[i]] + + see_memory_usage(f'after initializing group {i}', force=True) + + see_memory_usage('before initialize_optimizer', force=True) + self.initialize_optimizer_states() + see_memory_usage('end initialize_optimizer', force=True) + + if self.immediate_grad_update: + self.create_grad_acc_hooks() + + # Need optimizer states initialized before linking lp to optimizer state + self._link_all_hp_params() + self._hp_optimizer_states_linked = False + self._enable_universal_checkpoint() + self._param_slice_mappings = self._create_param_mapping() + + def _setup_for_bf16_params(self, real_dp_world_size, i, param_group, partition_id, trainable_parameters): + self.bf16_groups.append(trainable_parameters) + + # create flat bf16 params + self.bf16_groups_flat.append( + self._flatten_dense_tensors_aligned(self.bf16_groups[i], + self.nccl_start_alignment_factor * real_dp_world_size)) + # Make bf16 params point to flat tensor storage + self._update_storage_to_flattened_tensor(tensor_list=self.bf16_groups[i], flat_tensor=self.bf16_groups_flat[i]) + + # divide flat weights into equal sized partitions + partition_size = self.bf16_groups_flat[i].numel() // real_dp_world_size + bf16_dp_partitions = [ + self.bf16_groups_flat[i].narrow(0, dp_index * partition_size, partition_size) + for dp_index in range(real_dp_world_size) + ] + self.bf16_partitioned_groups.append(bf16_dp_partitions) + + # create fp32 params partition + self.fp32_groups_flat_partition.append(bf16_dp_partitions[partition_id].clone().to( + self.master_weights_dtype).detach()) + self.fp32_groups_flat_partition[i].requires_grad = True + + num_elem_list = [t.numel() for t in self.bf16_groups[i]] + + # create fp32 gradients + fp32_flat_buffer = torch.zeros_like(self.bf16_groups_flat[i], dtype=self.grad_acc_dtype) + self.fp32_groups_gradients_flat.append(fp32_flat_buffer) + if self.has_moe_layers and is_moe_param_group(param_group): + self.expert_gradients[param_group['name']].append(fp32_flat_buffer) + else: + self.non_expert_gradients.append(fp32_flat_buffer) + + # track individual fp32 gradients for entire model + fp32_gradients = self._split_flat_tensor(flat_tensor=self.fp32_groups_gradients_flat[i], + num_elem_list=num_elem_list) + self.fp32_groups_gradients.append(fp32_gradients) + self.fp32_groups_gradient_dict[i] = fp32_gradients + + # flat tensor corresponding to actual fp32 gradients (i.e., minus alignment padding) + length_without_padding = sum(num_elem_list) + self.fp32_groups_actual_gradients_flat.append( + torch.narrow(self.fp32_groups_gradients_flat[i], 0, 0, length_without_padding)) + + # flat tensor corresponding to gradient partition + self.fp32_groups_gradient_flat_partition.append( + torch.narrow(self.fp32_groups_gradients_flat[i], 0, partition_id * partition_size, partition_size)) + + # track fp32 gradient updates + self.fp32_groups_has_gradients.append([False] * len(self.bf16_groups[i])) + + # Record padding required for alignment + if partition_id == dist.get_world_size(group=self.real_dp_process_group[i]) - 1: + padding = self.bf16_groups_flat[i].numel() - length_without_padding + else: + padding = 0 + + self.group_paddings.append(padding) + + def _setup_for_fp8_params(self, real_dp_world_size, i, param_group, partition_id, trainable_parameters): + assert param_group.get('calculate_statistics_fn') is not None, \ + "calculate_statistics_fn wasn't provided" + dtypes = param_group['group_dtypes'] + for dtype in dtypes: + assert dtype in [torch.float8_e5m2, torch.float8_e4m3fn], \ + f'{dtype} is not supported' + calculate_statistics_fn = param_group['calculate_statistics_fn'] + calculate_statistics_fn(trainable_parameters) + + for p in trainable_parameters: + p.all_data = {} + p.all_data[dtypes[0]] = p.cast_to(dtypes[0], p.data) + for j in range(1, len(dtypes)): + self.fp8_param_groups_dict[dtypes[j]]["param_groups"].append([]) + for p in trainable_parameters: + p.all_data[dtypes[j]] = p.cast_to(dtypes[j], p.data) + self.fp8_param_groups_dict[dtypes[j]]["param_groups"][i].append(p.all_data[dtypes[j]]) + for p in trainable_parameters: + p.data = p.all_data[dtypes[0]] + self.fp8_param_groups_dict[dtypes[0]]["param_groups"].append(trainable_parameters) + + # create flat fp8 params + first_iter = True + for dtype in dtypes: + self.fp8_param_groups_dict[dtype]["param_groups_flat"].append( + self._flatten_dense_tensors_aligned(self.fp8_param_groups_dict[dtype]["param_groups"][i], + self.nccl_start_alignment_factor * real_dp_world_size)) + # Make fp8 params point to flat tensor storage + self._update_storage_to_flattened_tensor( + tensor_list=self.fp8_param_groups_dict[dtype]["param_groups"][i], + flat_tensor=self.fp8_param_groups_dict[dtype]["param_groups_flat"][i]) + # divide flat weights into equal sized partitions + partition_size = self.fp8_param_groups_dict[dtype]["param_groups_flat"][i].numel() // real_dp_world_size + fp8_dp_partitions = [ + self.fp8_param_groups_dict[dtype]["param_groups_flat"][i].narrow(0, dp_index * partition_size, + partition_size) + for dp_index in range(real_dp_world_size) + ] + self.fp8_param_groups_dict[dtype]["param_partitioned_groups"].append(fp8_dp_partitions) + if first_iter: + # create fp32 params partition + self.fp32_groups_flat_partition.append(fp8_dp_partitions[partition_id].clone().to( + self.master_weights_dtype).detach()) + self.fp32_groups_flat_partition[i].requires_grad = True + + num_elem_list = [t.numel() for t in self.fp8_param_groups_dict[dtype]["param_groups"][i]] + + # create fp32 gradients + fp32_flat_buffer = torch.zeros_like(self.fp8_param_groups_dict[dtype]["param_groups_flat"][i], + dtype=self.grad_acc_dtype) + self.fp32_groups_gradients_flat.append(fp32_flat_buffer) + if self.has_moe_layers and is_moe_param_group(param_group): + self.expert_gradients[param_group['name']].append(fp32_flat_buffer) + else: + self.non_expert_gradients.append(fp32_flat_buffer) + + # track individual fp32 gradients for entire model + fp32_gradients = self._split_flat_tensor(flat_tensor=self.fp32_groups_gradients_flat[i], + num_elem_list=num_elem_list) + self.fp32_groups_gradients.append(fp32_gradients) + self.fp32_groups_gradient_dict[i] = fp32_gradients + + # flat tensor corresponding to actual fp32 gradients (i.e., minus alignment padding) + length_without_padding = sum(num_elem_list) + self.fp32_groups_actual_gradients_flat.append( + torch.narrow(self.fp32_groups_gradients_flat[i], 0, 0, length_without_padding)) + + # flat tensor corresponding to gradient partition + self.fp32_groups_gradient_flat_partition.append( + torch.narrow(self.fp32_groups_gradients_flat[i], 0, partition_id * partition_size, partition_size)) + + # track fp32 gradient updates + self.fp32_groups_has_gradients.append([False] * + len(self.fp8_param_groups_dict[dtype]["param_groups"][i])) + + # Record padding required for alignment + if partition_id == dist.get_world_size(group=self.real_dp_process_group[i]) - 1: + padding = self.fp8_param_groups_dict[dtype]["param_groups_flat"][i].numel( + ) - length_without_padding + else: + padding = 0 + + self.group_paddings.append(padding) + first_iter = False + + def _enable_universal_checkpoint(self): + for i, lp_param_group in enumerate(self.bf16_groups): + if lp_param_group is None: + dtypes = self.optimizer.param_groups[i]['group_dtypes'] + lp_param_group = self.fp8_param_groups_dict[dtypes[0]]["param_groups"][i] + enable_universal_checkpoint(param_list=lp_param_group) + + def _create_param_mapping(self): + param_mapping = [] + for i, param_group in enumerate(self.optimizer.param_groups): + param_mapping_per_group = OrderedDict() + if param_group.get('group_dtypes'): + dtypes = param_group['group_dtypes'] + params = self.fp8_param_groups_dict[dtypes[0]]["param_groups"][i] + else: + params = self.bf16_groups[i] + for lp in params: + if lp._hp_mapping is not None: + lp_name = self.param_names[lp] + param_mapping_per_group[lp_name] = lp._hp_mapping.get_hp_fragment_address() + param_mapping.append(param_mapping_per_group) + + return param_mapping + + def _link_all_hp_params(self): + for i, param_group in enumerate(self.optimizer.param_groups): + real_dp_world_size = dist.get_world_size(group=self.real_dp_process_group[i]) + + # Link bf16 and fp32 params in partition + partition_id = dist.get_rank(group=self.real_dp_process_group[i]) + if param_group.get('group_dtypes'): + dtypes = param_group.get('group_dtypes') + partition_size = self.fp8_param_groups_dict[ + dtypes[0]]["param_groups_flat"][i].numel() // real_dp_world_size + lp_param_list = self.fp8_param_groups_dict[dtypes[0]]["param_groups"][i] + else: + partition_size = self.bf16_groups_flat[i].numel() // real_dp_world_size + lp_param_list = self.bf16_groups[i] + flat_hp_partition = self.fp32_groups_flat_partition[i] + link_hp_params(lp_param_list=lp_param_list, + flat_hp_partition=flat_hp_partition, + gradient_dict=self.fp32_groups_gradient_dict, + offload_gradient_dict=None, + use_offload=False, + param_group_index=i, + partition_start=partition_id * partition_size, + partition_size=partition_size, + dp_group=self.real_dp_process_group[i]) + + def _lazy_init_hp_params_optimizer_state(self): + if not self._hp_optimizer_states_linked: + for i, param_group in enumerate(self.optimizer.param_groups): + if param_group.get('group_dtypes'): + dtypes = param_group.get('group_dtypes') + params = self.fp8_param_groups_dict[dtypes[0]]["param_groups"][i] + else: + params = self.bf16_groups[i] + lazy_init_hp_params_optimizer_state(params, self.fp32_groups_flat_partition[i], self.optimizer.state) + self._hp_optimizer_states_linked = True + + def initialize_optimizer_states(self): + """Take an optimizer step with zero-valued gradients to allocate internal + optimizer state. + + This helps prevent memory fragmentation by allocating optimizer state at the + beginning of training instead of after activations have been allocated. + """ + for param_partition, grad_partition in zip(self.fp32_groups_flat_partition, + self.fp32_groups_gradient_flat_partition): + # In case of grad acc dtype different than FP32, need to cast to high precision. + param_partition.grad = grad_partition.to( + param_partition.dtype) if grad_partition.dtype != param_partition.dtype else grad_partition + + if self.grad_acc_dtype is not torch.float32: + for param_partition in self.fp32_groups_flat_partition: + param_partition.grad = None + + self.clear_hp_grads() + + def _split_flat_tensor(self, flat_tensor, num_elem_list): + assert sum(num_elem_list) <= flat_tensor.numel() + tensor_list = [] + offset = 0 + for num_elem in num_elem_list: + dense_tensor = torch.narrow(flat_tensor, 0, offset, num_elem) + tensor_list.append(dense_tensor) + offset += num_elem + + return tensor_list + + def _update_storage_to_flattened_tensor(self, tensor_list, flat_tensor): + updated_params = self.unflatten(flat_tensor, tensor_list) + # TODO: SW-179781 need to remove the below WA once SW-179780 is resolved + get_accelerator().synchronize() + for p, q in zip(tensor_list, updated_params): + p.data = q.data + if hasattr(p, 'all_data'): + p.all_data[p.dtype] = q.data + + def _flatten_dense_tensors_aligned(self, tensor_list, alignment): + return self.flatten(align_dense_tensors(tensor_list, alignment)) + + @torch.no_grad() + def step(self, closure=None): + if closure is not None: + raise NotImplementedError(f'{self.__class__} does not support closure.') + + non_expert_grads_for_norm, expert_grads_for_norm = self.get_grads_for_norm() + non_expert_groups_norm = get_global_norm_of_tensors(input_tensors=non_expert_grads_for_norm, + mpu=self.mpu, + norm_type=self.norm_type, + use_graph=self.graph_harvesting) + all_groups_norm = non_expert_groups_norm + if self.has_moe_layers: + all_groups_norm = get_norm_with_moe_layers(non_expert_groups_norm, + mpu=self.mpu, + expert_tensors=expert_grads_for_norm, + norm_type=self.norm_type) + + self._global_grad_norm = all_groups_norm + + assert all_groups_norm > 0. + if self.clip_grad > 0.: + clip_tensors_by_global_norm(input_tensors=self.get_grads_for_norm(for_clipping=True), + max_norm=self.clip_grad, + global_norm=all_groups_norm, + mpu=self.mpu, + use_graph=self.graph_harvesting) + + for param_partition, grad_partition in zip(self.fp32_groups_flat_partition, + self.fp32_groups_gradient_flat_partition): + # In case of grad acc dtype different than FP32, need to cast to high precision. + param_partition.grad = grad_partition.to( + param_partition.dtype) if grad_partition.dtype != param_partition.dtype else grad_partition + + self.optimizer.step() + + if self.grad_acc_dtype is not torch.float32: + for param_partition in self.fp32_groups_flat_partition: + param_partition.grad = None + + # We need to link optimizer state after the first step() call + self._lazy_init_hp_params_optimizer_state() + + self.update_lp_params() + + self.clear_hp_grads() + + def backward(self, loss, update_hp_grads=True, clear_lp_grads=False, **bwd_kwargs): + """Perform a backward pass and copy the low-precision gradients to the + high-precision copy. + + If self.immediate_grad_update is false and update_hp_grads is true we copy/accumulate to the high-precision grads now + to prevent accumulating in the bf16 grads after successive backward() calls (i.e., grad accumulation steps > 1) + + The low-precision grads are deallocated during this procedure. + """ + self.clear_lp_grads() + loss.backward(**bwd_kwargs) + + if not self.immediate_grad_update and update_hp_grads: + self.update_hp_grads(clear_lp_grads=clear_lp_grads) + + @torch.no_grad() + def _update_hp_grad(self, lp, group_idx, param_idx, clear_lp_grads): + if lp.grad is None: + return + + hp_grad = self.fp32_groups_gradients[group_idx][param_idx] + assert hp_grad is not None, \ + f'high precision param has no gradient, lp param_id = {id(lp)} group_info = [{group_idx}][{param_idx}]' + + if hasattr(lp, 'hp_grad'): + grad = lp.hp_grad + else: + grad = lp.grad + hp_grad.data.add_(grad.data.to(hp_grad.dtype).view(hp_grad.shape)) + lp._hp_grad = hp_grad + self.fp32_groups_has_gradients[group_idx][param_idx] = True + + # clear gradients + if hasattr(lp, 'hp_grad'): + lp.hp_grad = None + lp.grad = None + elif clear_lp_grads: + lp.grad = None + + @torch.no_grad() + def _update_hp_grads_func(self, clear_lp_grads=False): + for i, group in enumerate(self.bf16_groups): + if group is None: + dtypes = self.optimizer.param_groups[i]['group_dtypes'] + group = self.fp8_param_groups_dict[dtypes[0]]["param_groups"][i] + for j, lp in enumerate(group): + self._update_hp_grad(lp, i, j, clear_lp_grads) + + @torch.no_grad() + def update_hp_grads(self, clear_lp_grads=False): + if self.immediate_grad_update: + return + + if self.graph_harvesting: + graph_process(False, self._update_hp_grads_func, clear_lp_grads) + else: + self._update_hp_grads_func(clear_lp_grads) + #cpu op + for i, group in enumerate(self.bf16_groups): + if group is None: + dtypes = self.optimizer.param_groups[i]['group_dtypes'] + group = self.fp8_param_groups_dict[dtypes[0]]["param_groups"][i] + for j, lp in enumerate(group): + if lp.grad is None: + continue + self.fp32_groups_has_gradients[i][j] = True + + @torch.no_grad() + def get_grads_for_reduction(self): + if self.has_moe_layers: + return self.non_expert_gradients, self.expert_gradients + return self.non_expert_gradients, {} + + @torch.no_grad() + def get_grads_for_norm(self, for_clipping=False): + """ + Returns: + tuple[list[Tensor], dict[ep_name, List[Tensor]] | list: + If for_clipping, return all gradients. + Otherwise, separate and return dict of expert_grad and list of non_expert_grad + """ + # (grads, expert_group_name) + expert_grads_for_norm = {} + + # grads + non_expert_grads_for_norm = [] + all_grads_for_clip = [] + + tensor_mp_rank = bwc_tensor_model_parallel_rank(mpu=self.mpu) + assert len(self.bf16_groups) == len(self.optimizer.param_groups) + for i, group in enumerate(self.bf16_groups): + if group is None: + dtypes = self.optimizer.param_groups[i]['group_dtypes'] + group = self.fp8_param_groups_dict[dtypes[0]]["param_groups"][i] + for j, lp in enumerate(group): + if not for_clipping: + if hasattr(lp, PIPE_REPLICATED) and lp.ds_pipe_replicated: + continue + + # skip duplicated parameters. perform norm only on cards with tp_rank=0. + # non-duplicated parameters include: + # - Parameters with tp: Use allreducesum of mp_group. + # - Moe Parameters with ep: Use allreducesum of ep_group. + if not (tensor_mp_rank == 0 or is_model_parallel_parameter(lp) or is_moe_param(lp)): + continue + + if not self.fp32_groups_has_gradients[i][j]: + continue + if not for_clipping: + param_group = self.optimizer.param_groups[i] + if self.has_moe_layers and is_moe_param_group(param_group): + if param_group['name'] not in expert_grads_for_norm: + expert_grads_for_norm[param_group['name']] = [] + expert_grads_for_norm[param_group['name']].append(self.fp32_groups_gradients[i][j]) + else: + non_expert_grads_for_norm.append(self.fp32_groups_gradients[i][j]) + else: + all_grads_for_clip.append(self.fp32_groups_gradients[i][j]) + if not for_clipping: + return non_expert_grads_for_norm, expert_grads_for_norm + return all_grads_for_clip + + @torch.no_grad() + def update_lp_params(self): + dtypes = [] + for i, (partition_group, + fp32_partition) in enumerate(zip(self.bf16_partitioned_groups, self.fp32_groups_flat_partition)): + partition_id = dist.get_rank(group=self.real_dp_process_group[i]) + if partition_group is None: + calculate_statistics_fn = self.optimizer.param_groups[i]['calculate_statistics_fn'] + dtypes = self.optimizer.param_groups[i]['group_dtypes'] + params = self.fp8_param_groups_dict[dtypes[0]]["param_groups"][i] + calculate_statistics_fn(params) + for p in params: + if p._hp_mapping is not None: + for dtype in dtypes: + fp8_partition_data_temp = p.cast_from(p._hp_mapping.hp_fragment.data, dtype) + p.all_data[dtype].flatten().narrow( + 0, p._hp_mapping.lp_fragment_address.start, + p._hp_mapping.lp_fragment_address.numel).copy_(fp8_partition_data_temp) + else: + partition_group[partition_id].data.copy_(fp32_partition.data) + # print_rank_0(f'update_lp_params {i=} {partition_id=}', force=True) + # if i == 0: + # print_rank_0(f'{fp32_partition[:10]=}', force=True) + + #TODO: SW-90304 call all_gather_dp_groups with async_op=true if zero optimizer hpu_use_async_collectives is enabled + if any(g is not None for g in self.bf16_groups_flat): + all_gather_dp_groups(groups_flat=self.bf16_groups_flat, + partitioned_param_groups=self.bf16_partitioned_groups, + dp_process_group=self.real_dp_process_group, + start_alignment_factor=self.nccl_start_alignment_factor, + allgather_bucket_size=self.allgather_bucket_size) + for dtype in dtypes: + param_groups_flat = self.fp8_param_groups_dict[dtype]["param_groups_flat"] + param_partitioned_groups = self.fp8_param_groups_dict[dtype]["param_partitioned_groups"] + if any(g is not None for g in param_groups_flat): + all_gather_dp_groups(groups_flat=param_groups_flat, + partitioned_param_groups=param_partitioned_groups, + dp_process_group=self.real_dp_process_group, + start_alignment_factor=self.nccl_start_alignment_factor, + allgather_bucket_size=self.allgather_bucket_size) + + def clear_hp_grads(self): + for flat_gradients in self.fp32_groups_gradients_flat: + flat_gradients.zero_() + + for i, group in enumerate(self.fp32_groups_gradients): + self.fp32_groups_has_gradients[i] = [False] * len(group) + + def clear_lp_grads(self): + + # using zero_() fixed memory address for graph replay + set_to_none = False if self.graph_harvesting else True + zero_grads_list = [] + for i, group in enumerate(self.bf16_groups): + if group is None: + dtypes = self.optimizer.param_groups[i]['group_dtypes'] + group = self.fp8_param_groups_dict[dtypes[0]]["param_groups"][i] + for param in group: + if set_to_none: + param.grad = None + if hasattr(param, 'hp_grad'): + param.hp_grad = None + elif param.grad is not None: + if param.grad.grad_fn is not None: + param.grad.detach_() + zero_grads_list.append(param.grad) + if not set_to_none and len(zero_grads_list) > 0: + torch._foreach_zero_(zero_grads_list) + + def state_dict(self): + state_dict = {} + state_dict[CLIP_GRAD] = self.clip_grad + state_dict[BASE_OPTIMIZER_STATE] = self.optimizer.state_dict() + state_dict[SINGLE_PARTITION_OF_FP32_GROUPS] = self.fp32_groups_flat_partition + state_dict[GROUP_PADDINGS] = self.group_paddings + state_dict[PARTITION_COUNT] = self.partition_count + state_dict[DS_VERSION] = version + state_dict[PARAM_SLICE_MAPPINGS] = self._param_slice_mappings + + return state_dict + + # Restore base optimizer fp32 weights from bfloat16 weights + def _restore_from_bit16_weights(self): + for i, group in enumerate(self.bf16_groups): + partition_id = dist.get_rank(group=self.real_dp_process_group[i]) + for j, (partitioned_group, + fp32_partition) in enumerate(zip(self.bf16_partitioned_groups, self.fp32_groups_flat_partition)): + if partitioned_group is None: + dtypes = self.optimizer.param_groups[j]['group_dtypes'] + partitioned_group = self.fp8_param_groups_dict[dtypes[0]]["param_partitioned_groups"][j] + fp32_partition.data.copy_(partitioned_group[partition_id].data) + + def refresh_fp32_params(self): + self._restore_from_bit16_weights() + + def load_state_dict(self, + state_dict_list, + checkpoint_folder, + load_optimizer_states=True, + load_from_fp32_weights=False, + load_serial=None): + if checkpoint_folder: + self._load_universal_checkpoint(checkpoint_folder, load_optimizer_states, load_from_fp32_weights) + else: + self._load_legacy_checkpoint(state_dict_list, load_optimizer_states, load_from_fp32_weights) + + def _load_legacy_checkpoint(self, state_dict_list, load_optimizer_states=True, load_from_fp32_weights=False): + + dp_rank = dist.get_rank(group=self.dp_process_group) + current_rank_sd = state_dict_list[dp_rank] + + ckpt_version = current_rank_sd.get(DS_VERSION, False) + assert ckpt_version, f"Empty ds_version in checkpoint, not clear how to proceed" + ckpt_version = pkg_version.parse(ckpt_version) + + self.clip_grad = current_rank_sd.get(CLIP_GRAD, self.clip_grad) + + if load_optimizer_states: + self.optimizer.load_state_dict(current_rank_sd[BASE_OPTIMIZER_STATE]) + + if load_from_fp32_weights: + for current, saved in zip(self.fp32_groups_flat_partition, + current_rank_sd[SINGLE_PARTITION_OF_FP32_GROUPS]): + src_tensor = _get_padded_tensor(saved, current.numel()) + current.data.copy_(src_tensor.data) + + if load_optimizer_states: + self._link_all_hp_params() + + def _load_universal_checkpoint(self, checkpoint_folder, load_optimizer_states, load_from_fp32_weights): + self._load_hp_checkpoint_state(checkpoint_folder) + + @property + def param_groups(self): + """Forward the wrapped optimizer's parameters.""" + return self.optimizer.param_groups + + def _load_hp_checkpoint_state(self, checkpoint_dir): + checkpoint_dir = os.path.join(checkpoint_dir, "zero") + tp_rank = bwc_tensor_model_parallel_rank(mpu=self.mpu) + tp_world_size = self.mpu.get_slice_parallel_world_size() + + for i, param_group in enumerate(self.optimizer.param_groups): + # We have an assumption that all params in the same param_group have the same keys + opt_keys = set() + if param_group.get('group_dtypes'): + dtypes = param_group['group_dtypes'] + group = self.fp8_param_groups_dict[dtypes[0]]["param_groups"][i] + else: + group = self.bf16_groups[i] + + for lp in group: + if lp._hp_mapping is not None: + #print(f"Loading {self.param_names[lp]} {tp_rank=} {tp_world_size=}") + lp.load_hp_checkpoint_state(os.path.join(checkpoint_dir, self.param_names[lp]), tp_rank, + tp_world_size) + for key in lp._hp_mapping.get_optim_state_keys(): + opt_keys.add(key) + map_to_flat_opt_states(param_group['params'][0], group, self.optimizer.state, opt_keys) + + def accumulate_hp_grads_and_remove_lp(self, lp_param, group_idx, param_idx): + assert self.immediate_grad_update + self._update_hp_grad(lp_param, group_idx, param_idx, clear_lp_grads=False) + + def create_grad_acc_hooks(self): + self.grad_accs = [] + for i, param_group in enumerate(self.bf16_groups): + if param_group is None: + dtypes = self.optimizer.param_groups[i]['group_dtypes'] + param_group = self.fp8_param_groups_dict[dtypes[0]]["param_groups"][i] + for j, param in enumerate(param_group): + if param.requires_grad: + + def wrapper(param, i, j): + param_tmp = param.expand_as(param) + grad_acc = param_tmp.grad_fn.next_functions[0][0] + + def accumulate_hp_grads_and_remove_lp(*notneeded): + self.accumulate_hp_grads_and_remove_lp(param, i, j) + + grad_acc.register_hook(accumulate_hp_grads_and_remove_lp) + self.grad_accs.append(grad_acc) + + wrapper(param, i, j) + + +def _get_padded_tensor(src_tensor, size): + if src_tensor.numel() >= size: + return src_tensor + padded_tensor = torch.zeros(size, dtype=src_tensor.dtype, device=src_tensor.device) + slice_tensor = torch.narrow(padded_tensor, 0, 0, src_tensor.numel()) + slice_tensor.data.copy_(src_tensor.data) + return padded_tensor diff --git a/deepspeed/runtime/hpu_utils.py b/deepspeed/runtime/hpu_utils.py new file mode 100644 index 000000000000..5a7ca4d4b97f --- /dev/null +++ b/deepspeed/runtime/hpu_utils.py @@ -0,0 +1,11 @@ +# Copyright (c) 2023 Habana Labs, Ltd. an Intel Company +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from deepspeed.accelerator import get_accelerator + + +def get_use_hpu(): + return get_accelerator().device_name() == "hpu" diff --git a/deepspeed/runtime/hybrid_engine.py b/deepspeed/runtime/hybrid_engine.py index a991c4304563..8a6311bb6e83 100644 --- a/deepspeed/runtime/hybrid_engine.py +++ b/deepspeed/runtime/hybrid_engine.py @@ -17,16 +17,14 @@ from deepspeed.accelerator import get_accelerator from torch import nn from deepspeed.utils import logger - -from deepspeed.ops.op_builder import InferenceBuilder - from deepspeed.module_inject.layers import LinearLayer, Normalize, EmbeddingLayer, OPTEmbedding +from ..ops.transformer.inference.op_binding.workspace import WorkspaceOp + try: import transformers OPTLearnedPositionalEmbedding = transformers.models.opt.modeling_opt.OPTLearnedPositionalEmbedding except: OPTLearnedPositionalEmbedding = None -inference_cuda_module = None class DeepSpeedHybridEngine(DeepSpeedEngine): @@ -61,12 +59,8 @@ def __init__(self, args, model, **kwargs): self._total_batch_size = None self._gather_latency = 0 - global inference_cuda_module - if inference_cuda_module is None: - builder = InferenceBuilder() - inference_cuda_module = builder.load() - self.is_lora_fused = False + self.workspace = WorkspaceOp() def convert_to_linear_transposed(self, model): @@ -160,13 +154,13 @@ def unfuse_lora_weight_non_pinned(self): def retake_inference_cache(self): if self._config.hybrid_engine.release_inference_cache: - retake_success = inference_cuda_module.retake_workspace() + retake_success = self.workspace.retake_workspace() if not retake_success: logger.warning("Unable to acquire workspace on first attempt, emptying cache and retrying.") gc.collect() get_accelerator().empty_cache() - retake_success = inference_cuda_module.retake_workspace() + retake_success = self.workspace.retake_workspace() if not retake_success: raise RuntimeError("Unable to retake inference workspace.") @@ -269,7 +263,7 @@ def generate(self, *inputs, **kwargs): self.is_lora_fused = False if self._config.hybrid_engine.release_inference_cache: - inference_cuda_module.release_workspace() + self.workspace.release_workspace() gc.collect() get_accelerator().empty_cache() diff --git a/deepspeed/runtime/pipe/engine.py b/deepspeed/runtime/pipe/engine.py index ef1c98a95c7b..f2a67dd30991 100644 --- a/deepspeed/runtime/pipe/engine.py +++ b/deepspeed/runtime/pipe/engine.py @@ -4,6 +4,7 @@ # DeepSpeed Team from types import MethodType +from collections import OrderedDict import torch from deepspeed import comm as dist @@ -12,6 +13,7 @@ from deepspeed.utils.timer import ThroughputTimer from deepspeed.accelerator import get_accelerator from deepspeed.runtime.bf16_optimizer import BF16_Optimizer +from deepspeed.runtime.fp8_optimizer import FP8_Optimizer from ..engine import DeepSpeedEngine, MEMORY_OPT_ALLREDUCE_SIZE from deepspeed.utils.timer import FORWARD_MICRO_TIMER, FORWARD_GLOBAL_TIMER, BACKWARD_MICRO_TIMER, \ @@ -66,9 +68,7 @@ class PipelineEngine(DeepSpeedEngine): def __init__(self, has_bool_tensors=False, *super_args, **super_kwargs): super().__init__(*super_args, **super_kwargs) - assert isinstance(self.module, PipelineModule) \ - or (hasattr(self.module, 'wrapped') and isinstance(self.module.wrapped, PipelineModule)), \ - "model must base PipelineModule" + assert isinstance(self.module, PipelineModule), "model must base PipelineModule" assert self.zero_optimization_stage( ) < ZeroStageEnum.gradients, "ZeRO-2 and ZeRO-3 are incompatible with pipeline parallelism" @@ -79,7 +79,7 @@ def __init__(self, has_bool_tensors=False, *super_args, **super_kwargs): self.eval_return_logits = False self.outputs = None # BF16 Optimizer is hardcoded for fp32 gradient accumulation - self.using_bf16_optimizer = type(self.optimizer) == BF16_Optimizer + self.using_bf16_optimizer = type(self.optimizer) in [BF16_Optimizer, FP8_Optimizer] # used to disable the pipeline all-reduce when used with 1-bit Adam/1-bit LAMB self.pipeline_enable_backward_allreduce = True @@ -118,7 +118,8 @@ def __init__(self, has_bool_tensors=False, *super_args, **super_kwargs): self._force_grad_boundary = False - self.batch_timer = ThroughputTimer(batch_size=self.train_batch_size(), + self.batch_timer = ThroughputTimer(self._config.timers_config, + batch_size=self.train_batch_size(), logging_fn=self.tput_log, monitor_memory=False, steps_per_output=self.steps_per_print()) @@ -151,11 +152,33 @@ def __init__(self, has_bool_tensors=False, *super_args, **super_kwargs): if self.global_rank != min(d['ranks']): tied_params += sum(p.numel() for p in d['module'].parameters()) unique_params -= tied_params - params_tensor = torch.LongTensor(data=[num_params, unique_params]).to(self.device) + + # Use Int32 representation instead of Int64 for calclations. + # num_param division & modulo after all reduce should be lower than MAX Int32. + # Using this value will be safe if used with less than ~2000 devices. + # Int32Max > all_reduce_group*chunk_size + chunk_size = 10**6 + + num_params_quotient = num_params // chunk_size + num_params_remainder = num_params % chunk_size + + unique_params_quotient = unique_params // chunk_size + unique_params_remainder = unique_params % chunk_size + + assert (unique_params_quotient * chunk_size + + unique_params_remainder) == unique_params, "Value mismatch after Int64 splitting" + assert (num_params_quotient * chunk_size + + num_params_remainder) == num_params, "Value mismatch after Int64 splitting" + + params_tensor = torch.IntTensor( + data=[num_params_quotient, num_params_remainder, unique_params_quotient, unique_params_remainder]).to( + self.device) + dist.all_reduce(params_tensor, group=self.grid.get_model_parallel_group()) params_tensor = params_tensor.tolist() - total_params = params_tensor[0] - unique_params = params_tensor[1] + total_params = params_tensor[0] * chunk_size + params_tensor[1] + unique_params = params_tensor[2] * chunk_size + params_tensor[3] + if self.grid.data_parallel_id == 0: logger.info(f'RANK={self.global_rank} ' f'STAGE={self.stage_id} ' @@ -194,9 +217,15 @@ def __init__(self, has_bool_tensors=False, *super_args, **super_kwargs): #stores the loss for the entire batch self.total_loss = None + self.total_additional_losses = None self.agg_loss = torch.tensor(0.0, requires_grad=False).to(self.device) self.dp_group_loss = torch.tensor(0.0, requires_grad=False).to(self.device) + # stores aggregated-DP train final loss and aggregated-DP additional losses, if any + # additional losses are stored as dict: {loss-name: agg-loss} + self.agg_train_loss = None + self.agg_additional_losses = None + if self._config.pipeline['activation_checkpoint_interval'] > 0: self.module.activation_checkpoint_interval = self._config.pipeline['activation_checkpoint_interval'] # set use_reentrant default to True. @@ -207,6 +236,8 @@ def __init__(self, has_bool_tensors=False, *super_args, **super_kwargs): self.module.activation_checkpoint_func = ds_checkpointing.non_reentrant_checkpoint if self.grid.get_global_rank() == 0: logger.info(f'CONFIG: activation_checkpoint_func=non_reentrant_checkpoint') + if self.module.activation_checkpoint_interval > 0: + self.module._precompute_checkpointable_values() self.module.checkpoint_parallel_write_pipeline = self._config.checkpoint_parallel_write_pipeline @@ -284,10 +315,7 @@ def _exec_reduce_grads(self): self._force_grad_boundary = False def _bf16_reduce_grads(self): - # Make our own list of gradients from the optimizer's FP32 grads - grads = [] - self.buffered_allreduce_fallback(grads=self.optimizer.get_grads_for_reduction(), - elements_per_buffer=MEMORY_OPT_ALLREDUCE_SIZE) + self.buffered_allreduce_fallback(grads=None, elements_per_buffer=MEMORY_OPT_ALLREDUCE_SIZE) def _reserve_pipe_buffers(self, num_buffers): """Ensure that each pipeline buffer has at least ``num_buffers`` slots. @@ -363,6 +391,7 @@ def train_batch(self, data_iter=None): self.module.train() self.total_loss = None + self.total_additional_losses = None self._compute_loss = True # Do the work @@ -371,7 +400,9 @@ def train_batch(self, data_iter=None): stages=self.num_stages, stage_id=self.stage_id) self._exec_schedule(sched) - self.agg_train_loss = self._aggregate_total_loss() + + with torch.no_grad(): + self.agg_train_loss = self._aggregate_total_loss() self.timers(TRAIN_BATCH_TIMER).stop() @@ -380,10 +411,12 @@ def train_batch(self, data_iter=None): elapsed = self.timers(TRAIN_BATCH_TIMER).elapsed(reset=True) / 1000.0 iter_time = elapsed / self.steps_per_print() tput = self.train_batch_size() / iter_time - print(f'steps: {self.global_steps} ' - f'loss: {self.agg_train_loss:0.4f} ' - f'iter time (s): {iter_time:0.3f} ' - f'samples/sec: {tput:0.3f}') + log_str = f'steps: {self.global_steps} loss: {self.agg_train_loss:0.4f} ' + if self.agg_additional_losses is not None: + for loss_name, loss_value in self.agg_additional_losses.items(): + log_str += f'{loss_name}: {loss_value.item():0.4f} ' + log_str += f'iter time (s): {iter_time:0.3f} samples/sec: {tput:0.3f}' + print(log_str) else: self.timers(TRAIN_BATCH_TIMER).elapsed(reset=True) @@ -463,9 +496,7 @@ def eval_batch(self, micro_batches = self.micro_batches if num_micro_batches is None else num_micro_batches # Do the work - sched = schedule.InferenceSchedule(micro_batches=self.micro_batches, - stages=self.num_stages, - stage_id=self.stage_id) + sched = schedule.InferenceSchedule(micro_batches=micro_batches, stages=self.num_stages, stage_id=self.stage_id) # prevent dead-lock with multiple evals sequence dist.barrier() @@ -565,29 +596,66 @@ def _bcast_pipe_scalar(self, data, src_rank=None, dtype=torch.float32): def _aggregate_total_loss(self): # Scale loss, average among DP ranks, and bcast loss to the rest of my DP group if self.is_last_stage(): + # Scale loss and additional losses, if any loss = self._scale_loss_by_gas(self.total_loss) - self.dp_group_loss = loss.clone().detach() + self.agg_additional_losses = self.total_additional_losses + if self.agg_additional_losses is not None: + self.agg_additional_losses = OrderedDict({ + loss_name: self._scale_loss_by_gas(_loss.clone().detach()) + for loss_name, _loss in self.agg_additional_losses.items() + }) - ## Average loss across all data-parallel groups + self.dp_group_loss = loss.clone().detach() agg_loss = self.dp_group_loss.clone().detach() #print(f'RANK={self.global_rank} bcast SENDER src={self.global_rank} group={self.grid.pp_group}', flush=True) + + # Average loss across all data-parallel groups if self.is_data_parallel: - dist.all_reduce(agg_loss, group=self.mpu.get_data_parallel_group()) - agg_loss /= self.dp_world_size + if self.agg_additional_losses is None: + dist.all_reduce(agg_loss, group=self.mpu.get_data_parallel_group()) + agg_loss /= self.dp_world_size + else: + # use a single reduce op for agg_loss and additional losses, if any + assert '__train_loss__' not in self.agg_additional_losses.keys() + tensors = OrderedDict({'__train_loss__': agg_loss}) + tensors.update(self.agg_additional_losses.items()) + flat_tensor = torch.cat([t.clone().reshape(-1).detach() for t in tensors.values()]) + dist.all_reduce(flat_tensor, group=self.mpu.get_data_parallel_group()) + flat_tensor /= self.dp_world_size + offset = 0 + reduced_tensor = {} + for name, t in tensors.items(): + n_elem = t.numel() + reduced_tensor[name] = flat_tensor[offset:offset + n_elem].clone().detach().reshape(t.shape) + offset += n_elem + agg_loss = reduced_tensor['__train_loss__'] + self.agg_additional_losses = OrderedDict( + {name: reduced_tensor[name] + for name in self.agg_additional_losses.keys()}) assert self.global_rank in self.grid.pp_group - losses = torch.stack([self.dp_group_loss, agg_loss]).float() + losses = [self.dp_group_loss, agg_loss] + if self.agg_additional_losses is not None: + losses += list(self.agg_additional_losses.values()) + losses = torch.stack(losses).float() if self.is_pipe_parallel: dist.broadcast(tensor=losses, src=self.global_rank, group=self.mpu.get_pipe_parallel_group()) else: # Get loss from last stage src_rank = self.grid.stage_to_global(self.num_stages - 1) assert src_rank in self.grid.pp_group - losses = torch.Tensor([0., 0.]).to(self.device) + # losses to reduce are: dp_group_loss, agg_loss, model additional losses + # therefore: 2 + n_additional_losses + additional_losses = self.module.get_additional_losses() + n_additional_losses = 0 if additional_losses is None else len(additional_losses) + losses = torch.Tensor([0.] * (2 + n_additional_losses)).to(self.device) dist.broadcast(tensor=losses, src=src_rank, group=self.grid.get_pipe_parallel_group()) self.dp_group_loss = losses[0].clone().detach() agg_loss = losses[1].clone().detach() - + if additional_losses is not None: + self.agg_additional_losses = OrderedDict( + {name: losses[2 + i].clone().detach() + for i, name in enumerate(additional_losses.keys())}) return agg_loss def set_dataloader(self, loader): @@ -697,7 +765,7 @@ def _exec_forward_pass(self, buffer_id): raise ValueError("expecting a tensor or a tuple of tensors") part = PartitionedTensor(tensor=first_output, group=self.grid.get_slice_parallel_group()) # Clear the large output data, but save the computation graph - first_output.data = torch.zeros(1) + first_output.data = torch.zeros(1, device=first_output.data.device) self.pipe_buffers['output_tensors'][buffer_id] = first_output # Inject the partitioned tensor into the output before sending outputs = (part.to_meta(), part.data(), *outputs_tail) @@ -715,19 +783,34 @@ def _exec_forward_pass(self, buffer_id): self.loss = outputs if self.eval_return_logits: self.outputs = outputs + if isinstance(self.loss, torch.Tensor): self.fwd_outputs.append(self.loss.detach()) - - if self.total_loss is None: - self.total_loss = torch.zeros_like(self.loss) - self.total_loss += self.loss.detach() else: self.fwd_outputs.append([l.detach() for l in self.loss]) - if self.total_loss is None: - self.total_loss = [torch.zeros_like(l) for l in self.loss] - for idx, l in enumerate(self.loss): - self.total_loss[idx] += l.detach() + def add_to_total_loss(_total_loss, _loss): + if isinstance(_loss, torch.Tensor): + if _total_loss is None: + _total_loss = torch.zeros_like(_loss) + _total_loss += _loss.detach() + else: + if _total_loss is None: + _total_loss = [torch.zeros_like(_l) for _l in _loss] + for _idx, _l in enumerate(_loss): + _total_loss[_idx] += _l.detach() + return _total_loss + + self.total_loss = add_to_total_loss(self.total_loss, self.loss) + + # aggregate additional losses across gradient accumulation steps + additional_losses = self.module.get_additional_losses() + if additional_losses is not None: + if self.total_additional_losses is None: + self.total_additional_losses = OrderedDict() + for name, loss in additional_losses.items(): + total = self.total_additional_losses[name] if name in self.total_additional_losses else None + self.total_additional_losses[name] = add_to_total_loss(total, loss) def _exec_backward_pass(self, buffer_id): assert self.optimizer is not None, "must provide optimizer during " \ @@ -792,7 +875,8 @@ def _exec_backward_pass(self, buffer_id): if self.using_bf16_optimizer and not self.is_last_stage(): # manually call because we don't call optimizer.backward() - self.optimizer.update_hp_grads(clear_lp_grads=False) + if not self._config.bfloat16_immediate_grad_update: + self.optimizer.update_hp_grads(clear_lp_grads=False) # Free up the memory from the output of forward() self.pipe_buffers['output_tensors'][buffer_id] = None @@ -1332,7 +1416,7 @@ def load_module_state_dict(self, checkpoint, strict=True, custom_load_fn=None, f strict (bool, optional): Strict state loading. Defaults to True. """ assert custom_load_fn is None, "custom_load_fn not supported w. pipeline parallelism" - state_dict = checkpoint['module'] + state_dict = checkpoint if self.has_moe_layers else checkpoint['module'] if (state_dict is not None) and (not isinstance(state_dict, str)): super().load_module_state_dict(state_dict, strict) return @@ -1371,3 +1455,6 @@ def _exec_schedule(self, pipe_schedule): # Equivalent to: self._exec_forward_pass(buffer_id=0) self._exec_instr = MethodType(self._INSTRUCTION_MAP[type(cmd)], self) self._exec_instr(**cmd.kwargs) + + def get_additional_losses(self): + return self.agg_additional_losses diff --git a/deepspeed/runtime/pipe/module.py b/deepspeed/runtime/pipe/module.py index c11379b0a0d7..3c25cbee66ec 100644 --- a/deepspeed/runtime/pipe/module.py +++ b/deepspeed/runtime/pipe/module.py @@ -196,6 +196,16 @@ def __init__(self, #newseed = get_accelerator().initial_seed() + self._grid.get_stage_id() #ds_utils.set_random_seed(newseed) + self.activation_checkpoint_interval = activation_checkpoint_interval + + self.activation_checkpoint_func = activation_checkpoint_func + + #storage for precomputed checkpointeble results + self.is_checkpointable_results = [] + self.is_checkpointable_results_interval = None + + # if configuration use_reentrant = False, self.activation_checkpoint_func will be set to ``checkpointing.non_reentrant_checkpoint`` + #with torch.random.fork_rng(devices=[get_accelerator().current_device_name()]): self._build() self.to(get_accelerator().device_name(self.local_rank)) @@ -203,10 +213,15 @@ def __init__(self, self.tied_comms = self._index_tied_modules() self._synchronize_tied_weights() - self.activation_checkpoint_interval = activation_checkpoint_interval - - self.activation_checkpoint_func = activation_checkpoint_func - # if configuration use_reentrant = False, self.activation_checkpoint_func will be set to ``checkpointing.non_reentrant_checkpoint`` + def _precompute_checkpointable_values(self): + if self.activation_checkpoint_interval > 0 and self.is_checkpointable_results_interval != self.activation_checkpoint_interval: + num_layers = len(self.forward_funcs) + self.interval_was_zero = False + for start_idx in range(0, num_layers, self.activation_checkpoint_interval): + end_idx = min(start_idx + self.activation_checkpoint_interval, num_layers) + funcs = self.forward_funcs[start_idx:end_idx] + self.is_checkpointable_results.append(self._is_checkpointable(funcs)) + self.is_checkpointable_results_interval = self.activation_checkpoint_interval def _build(self): specs = self._layer_specs @@ -352,7 +367,9 @@ def exec_func(*inputs): else: num_layers = len(self.forward_funcs) x = forward_input - for start_idx in range(0, num_layers, self.activation_checkpoint_interval): + for start_idx, is_checkpointable_result in \ + zip(range(0, num_layers, self.activation_checkpoint_interval), self.is_checkpointable_results): + end_idx = min(start_idx + self.activation_checkpoint_interval, num_layers) funcs = self.forward_funcs[start_idx:end_idx] @@ -361,7 +378,7 @@ def exec_func(*inputs): if not isinstance(x, tuple): x = (x, ) - if self._is_checkpointable(funcs): + if is_checkpointable_result: x = self.activation_checkpoint_func(exec_range_func(start_idx, end_idx), *x) else: x = exec_range_func(start_idx, end_idx)(*x) @@ -634,3 +651,10 @@ def _is_checkpointable(self, funcs): return all(f.__class__.__name__ in self.checkpointable_layers for f in funcs) params = [f.parameters() for f in funcs if isinstance(f, torch.nn.Module)] return any(len(list(p)) > 0 for p in params) + + def get_additional_losses(self): + """ Returns model specific additional losses for reporting + + Return a dictionary of {"loss name": loss_value} or None if no additional losses. + """ + return None diff --git a/deepspeed/runtime/state_dict_factory.py b/deepspeed/runtime/state_dict_factory.py index c2db85d1ba58..469e2fe9f55f 100755 --- a/deepspeed/runtime/state_dict_factory.py +++ b/deepspeed/runtime/state_dict_factory.py @@ -335,10 +335,26 @@ def merge_state_dict(self, new_client_sd[key] = torch.cat(value_list, axis=0) else: new_client_sd[key] = self.merge_query_key_value(value_list, ckpt_ver) - elif "mlp.dense_h_to_4h.weight" in key or "word_embeddings.weight" in key or "mlp.dense_h_to_4h.bias" in key: - if quantize and "mlp.dense_h_to_4h.weight" in key: - value_list = quantizer.Quantize(value_list, quantize_bits, groups, key=key) + elif "word_embeddings.weight" in key or "mlp.dense_h_to_4h.bias" in key or "lm_head.weight" in key: new_client_sd[key] = torch.cat(value_list, axis=0) + elif "mlp.dense_h_to_4h.weight" in key: + if quantize: + value_list = quantizer.Quantize(value_list, quantize_bits, groups, key=key) + # HACK: + # Following code checks if h_to_4h is swiglu. This is required in order to merge correctly. + # The correct way is to add metadata to state_dict that provides info on how to merge/split each tensor. + size_h_to_4h = sd_list[0]["mlp.dense_h_to_4h.weight"].numel() + size_4h_to_h = sd_list[0]["mlp.dense_4h_to_h.weight"].numel() + if size_h_to_4h == size_4h_to_h: + new_client_sd[key] = torch.cat(value_list, axis=0) + elif size_h_to_4h == 2 * size_4h_to_h: + chunked_slices = [torch.chunk(v, 2, dim=0) for v in value_list] + merged_chunks_0 = torch.cat([s[0] for s in chunked_slices], dim=0) + merged_chunks_1 = torch.cat([s[1] for s in chunked_slices], dim=0) + new_client_sd[key] = torch.cat([merged_chunks_0, merged_chunks_1], dim=0) + else: + assert False, f"Unsupported slices size of mlp.dense_h_to_4h.weight={size_h_to_4h} " \ + f"mlp.dense_4h_to_h.weight={size_4h_to_h}" else: new_client_sd[key] = value_list[0] if quantize: @@ -383,12 +399,27 @@ def split_state_dict(self, q_vals = quantizer.Quantize([value], quantize_bits, groups, key) value = q_vals[0] new_client_sd[key] = self.split_query_key_value(value, num_to_split, ckpt_offset, ckpt_ver) - elif "mlp.dense_h_to_4h.weight" in key or "word_embeddings.weight" in key or "mlp.dense_h_to_4h.bias" in key or "final_linear.weight" in key: + elif "word_embeddings.weight" in key or "mlp.dense_h_to_4h.bias" in key or "final_linear.weight" in key \ + or "lm_head.weight" in key: assert value.shape[0] % num_to_split == 0 split_size = value.shape[0] // num_to_split - if quantize and "mlp.dense_h_to_4h.weight" in key: + new_client_sd[key] = torch.split(value, split_size, dim=0)[ckpt_offset] + elif "mlp.dense_h_to_4h.weight" in key: + assert value.shape[0] % num_to_split == 0 + split_size = value.shape[0] // num_to_split + if quantize: q_vals = quantizer.Quantize([value], quantize_bits, groups, key) value = q_vals[0] + # HACK: + # Following code checks if h_to_4h is swiglu. + # The correct way to check is to add metadata to state_dict that provides info on + # how to merge/split each tensor. + # Currently, swiglu split is NOT supported as it requires handling of all chunks. + size_h_to_4h = value.numel() + size_4h_to_h = client_sd["mlp.dense_4h_to_h.weight"].numel() + assert size_h_to_4h == size_4h_to_h, \ + f"Split not supported dense_h_to_4h.weight size={size_h_to_4h} " \ + f"and dense_4h_to_h.weight size={size_4h_to_h}" new_client_sd[key] = torch.split(value, split_size, dim=0)[ckpt_offset] else: new_client_sd[key] = value diff --git a/deepspeed/runtime/utils.py b/deepspeed/runtime/utils.py index d1ebe4b2f83d..92e22a8e128c 100755 --- a/deepspeed/runtime/utils.py +++ b/deepspeed/runtime/utils.py @@ -25,6 +25,8 @@ from torch import inf from deepspeed.utils import groups, logger +from deepspeed.utils.bwc import (bwc_tensor_model_parallel_rank, bwc_pipeline_parallel_world_size, + bwc_pipeline_parallel_group) from deepspeed.runtime.constants import PIPE_REPLICATED from numpy import prod from deepspeed.accelerator import get_accelerator @@ -117,44 +119,6 @@ def is_model_parallel_parameter(p) -> bool: return False -def bwc_tensor_model_parallel_rank(mpu=None): - """Backwards-compatible way of querying the tensor model parallel rank from - an ``mpu`` object. - - *Tensor* model parallelism means that tensors are physically split across - processes. This contrasts with *pipeline* model parallelism, in which the - layers are partitioned but tensors left intact. - - The API for tensor model parallelism has changed across versions and this - helper provides a best-effort implementation across versions of ``mpu`` - objects. The preferred mechanism is - ``mpu.get_tensor_model_parallel_rank()``. - - This should "just work" with both Megatron-LM and DeepSpeed's pipeline - parallelism. - - Args: - mpu (model parallel unit, optional): The tensor model parallel rank. - If ``mpu=None``, returns 0. Defaults to ``None``. - - Returns: - int: the rank - """ - if mpu is None: - # No model parallelism in easy :) - return 0 - - if hasattr(mpu, 'get_tensor_model_parallel_rank'): - # New Megatron and DeepSpeed convention (post pipeline-parallelism release) - return mpu.get_tensor_model_parallel_rank() - elif hasattr(mpu, 'get_slice_parallel_rank'): - # Some DeepSpeed + pipeline parallelism versions - return mpu.get_slice_parallel_rank() - else: - # Deprecated Megatron and DeepSpeed convention - return mpu.get_model_parallel_rank() - - def copy_to_device(item, device, criterion_func): """ Return a copy of tensor on specified device. @@ -205,6 +169,17 @@ def move_to_device(item, device, criterion_func): return item +def get_norm_with_moe_layers_fast(all_groups_norm, group): + # This implementation standardizes the grad_norm across ranks. A more precise implementation can be found in 'get_norm_with_moe_layers'. + # Need to allreduce (avg) the norms across different ranks because moe params will not be synced during allreduce + scaled_norm = all_groups_norm * 1.0 / float(dist.get_world_size(group=group)) + scaled_norm_tensor = torch.tensor(scaled_norm, device=get_accelerator().current_device(), dtype=torch.float) + dist.all_reduce(scaled_norm_tensor, group=group) + all_groups_norm = scaled_norm_tensor.item() + #print(f"old = {all_groups_norm_old} and new = {all_groups_norm} at rank: {deepspeed.comm.get_rank()}") + return all_groups_norm + + class CheckOverflow(object): '''Checks for overflow in gradient across parallel process''' @@ -369,8 +344,7 @@ def clip_grad_norm_(parameters, max_norm, norm_type=2, mpu=None): for p in parameters: all_norms.append(p.grad.data.abs().max().float()) total_norm = torch.stack(all_norms).max() - origin_device = total_norm.device.type - total_norm = total_norm.to(get_accelerator().device_name()) + total_norm = total_norm.to(get_accelerator().current_device_name()) # Take max across all GPUs. if mpu is not None: dist.all_reduce(total_norm, op=dist.ReduceOp.MAX, group=mpu.get_model_parallel_group()) @@ -387,9 +361,8 @@ def clip_grad_norm_(parameters, max_norm, norm_type=2, mpu=None): if len(all_norms) > 0: total_norm = torch.stack(all_norms).square().sum().float() else: - total_norm = torch.FloatTensor([0.0]).to(parameters[0].device) - origin_device = total_norm.device.type - total_norm = total_norm.to(get_accelerator().device_name()) + total_norm = get_accelerator().FloatTensor([0.0]) + total_norm = total_norm.to(get_accelerator().current_device_name()) # Sum across all model parallel GPUs. if mpu is not None: dist.all_reduce(total_norm, op=dist.ReduceOp.SUM, group=mpu.get_model_parallel_group()) @@ -402,11 +375,11 @@ def clip_grad_norm_(parameters, max_norm, norm_type=2, mpu=None): dist.all_reduce(scaled_norm_tensor, group=pg) total_norm = scaled_norm_tensor - total_norm = total_norm.to(origin_device) + total_norm = total_norm.to(parameters[0].device) - max_norm = torch.tensor([float(max_norm)], device=parameters[0].device) + max_norm = torch.tensor([float(max_norm)], device=total_norm.device) clip_coef = max_norm / (total_norm + 1e-6) - tmp_tensor = torch.tensor([1.0], device=parameters[0].device) + tmp_tensor = torch.tensor([1.0], device=clip_coef.device) clip_coef = torch.min(tmp_tensor, clip_coef) for p in parameters: p.grad.data.mul_(clip_coef) @@ -861,7 +834,7 @@ def clip_gradients(parameters, max_norm=1.0, global_grad_norm=None, mpu=None, ep return global_grad_norm -def get_global_norm_of_tensors(input_tensors, norm_type=2, mpu=None, use_graph=False): +def get_global_norm_of_tensors(input_tensors, norm_type=2, mpu=None, use_graph=False, moe_ep_group=None): """Get norm of an iterable of tensors. This is adapted from torch.nn.utils.clip_grad.clip_grad_norm_ and @@ -879,37 +852,64 @@ def get_global_norm_of_tensors(input_tensors, norm_type=2, mpu=None, use_graph=F assert all([torch.is_tensor(t) for t in input_tensors]), f'expected list of only tensors' norm_type = float(norm_type) + all_norms = [] if norm_type == inf: - total_norm = max(t.data.abs().max() for t in input_tensors) - total_norm_cuda = get_accelerator().FloatTensor([float(total_norm)]) + for t in input_tensors: + all_norms.append(t.data.abs().max().float()) + total_norm = torch.stack(all_norms).max() + device_total_norm = total_norm.to(get_accelerator().current_device_name()) + # Max across model parallel if mpu is not None: - dist.all_reduce(total_norm_cuda, op=dist.ReduceOp.MAX, group=mpu.get_model_parallel_group()) - total_norm = total_norm_cuda[0].item() + # For MoE grads, max over model parallel only if MoE-TP is enabled + if moe_ep_group is None or groups._get_expert_model_parallel_world_size() > 1: + dist.all_reduce(device_total_norm, op=dist.ReduceOp.MAX, group=mpu.get_model_parallel_group()) + # If MoE grads and MoE-TP disabled, max over pipeline parallel + elif bwc_pipeline_parallel_world_size(mpu) > 1: + dist.all_reduce(device_total_norm, op=dist.ReduceOp.MAX, group=bwc_pipeline_parallel_group(mpu)) + + # MoE grads: max across expert parallel group + if moe_ep_group is not None: + dist.all_reduce(device_total_norm, op=dist.ReduceOp.MAX, group=moe_ep_group) + total_norm = device_total_norm.to(input_tensors[0].device) else: - if use_graph: - if 'norm_tensors_compute_buffer' not in graph_cache: - graph_cache['norm_tensors_compute_buffer'] = [t.data.float().norm(norm_type) for t in input_tensors] - compute_buffer = graph_cache['norm_tensors_compute_buffer'] - def _norm_tensors(tensor_list, _compute_buffer, _norm_type): - for i, t in enumerate(tensor_list): - _compute_buffer[i].data.copy_(t.data.float().norm(_norm_type)**_norm_type) - if i != 0: - _compute_buffer[0].data.add_(_compute_buffer[i].data) + if 'norm_tensors_compute_buffer' not in graph_cache or len( + graph_cache['norm_tensors_compute_buffer']) != len(input_tensors): + graph_cache['norm_tensors_compute_buffer'] = [ + torch.empty([], dtype=torch.float, device=get_accelerator().current_device_name()) + for t in input_tensors + ] + compute_buffer = graph_cache['norm_tensors_compute_buffer'] - graph_process(False, _norm_tensors, input_tensors, compute_buffer, norm_type) + def _norm_tensors(tensor_list, _compute_buffer, _norm_type): + for i, t in enumerate(tensor_list): + _compute_buffer[i].data.copy_(t.data.float().norm(_norm_type)**_norm_type) + if i != 0: + _compute_buffer[0].data.add_(_compute_buffer[i].data) - total_norm = compute_buffer[0] + if use_graph: + graph_process(False, _norm_tensors, input_tensors, compute_buffer, norm_type) else: - total_norm = sum([t.data.float().norm(norm_type).item()**norm_type for t in input_tensors]) + _norm_tensors(input_tensors, compute_buffer, norm_type) - total_norm_cuda = get_accelerator().FloatTensor([float(total_norm)]).detach() + device_total_norm = compute_buffer[0].float().detach() + + # Sum across model parallel if mpu is not None: - dist.all_reduce(total_norm_cuda, op=dist.ReduceOp.SUM, group=mpu.get_model_parallel_group()) - total_norm = total_norm_cuda[0].item()**(1. / norm_type) + # For MoE grads, sum over model parallel only if MoE-TP is enabled + if moe_ep_group is None or groups._get_expert_model_parallel_world_size() > 1: + dist.all_reduce(device_total_norm, op=dist.ReduceOp.SUM, group=mpu.get_model_parallel_group()) + # If MoE grads and MoE-TP disabled, sum over pipeline parallel + elif bwc_pipeline_parallel_world_size(mpu) > 1: + dist.all_reduce(device_total_norm, op=dist.ReduceOp.SUM, group=bwc_pipeline_parallel_group(mpu)) - if total_norm == float('inf') or total_norm == -float('inf') or total_norm != total_norm: - total_norm = -1 + # MoE grads: sum across expert parallel group + if moe_ep_group is not None: + dist.all_reduce(device_total_norm, op=dist.ReduceOp.SUM, group=moe_ep_group) + total_norm = device_total_norm.to(input_tensors[0].device).pow(1. / norm_type) + + inf_or_nan = total_norm.isinf().logical_or(total_norm.isnan()) + total_norm.masked_fill_(inf_or_nan, -1) return total_norm @@ -964,6 +964,8 @@ def align_dense_tensors(tensor_list, alignment): def all_gather_into_tensor_dp_groups(groups_flat, partitioned_param_groups, dp_process_group): for group_id, (group_flat, partitioned_params) in enumerate(zip(groups_flat, partitioned_param_groups)): + if group_flat is None: + continue partition_id = dist.get_rank(group=dp_process_group[group_id]) dp_world_size = dist.get_world_size(group=dp_process_group[group_id]) if dp_world_size == 1: @@ -979,6 +981,8 @@ def all_gather_dp_groups(groups_flat, partitioned_param_groups, dp_process_group return all_gather_into_tensor_dp_groups(groups_flat, partitioned_param_groups, dp_process_group) for group_id, partitioned_params in enumerate(partitioned_param_groups): + if partitioned_params is None: + continue # Sequential AllGather Best of both worlds partition_id = dist.get_rank(group=dp_process_group[group_id]) dp_world_size = dist.get_world_size(group=dp_process_group[group_id]) @@ -1048,3 +1052,45 @@ def required_torch_version(min_version=None, max_version=None): return False return True + + +def get_norm_with_moe_layers(non_expert_norm, mpu, expert_tensors, norm_type=2): + """ Compute the global norm with MoE experts + + Inputs: + non_expert_norm (float) : the calculated norm of the non-expert params + expert_tensors (Dict[ep_name, List[Tensor]): Dictionary of expert group name to list of grad tensors + norm_type (int): the norm to use + + Returns: + if norm is (-/+) inf, returns -1 + otherwise the global norm (float) + """ + + def to_tensor(v): + return get_accelerator().FloatTensor(float(v)).detach() + + group_norms = [non_expert_norm] + for exp_name, tensors in expert_tensors.items(): + group_norm = get_global_norm_of_tensors(input_tensors=tensors, + mpu=mpu, + norm_type=norm_type, + use_graph=False, + moe_ep_group=groups._get_expert_parallel_group(exp_name)) + group_norms.append(group_norm) + + # check if all norms are valid + group_norms = torch.stack([to_tensor(norm) for norm in group_norms]) + if group_norms.eq(-1).any(): + return -1 + + # combine norms + if norm_type == inf: + total_norm = group_norms.max().item() + else: + total_norm = group_norms.pow(norm_type).sum() + total_norm = total_norm.item()**(1. / norm_type) + if total_norm == float('inf') or total_norm == -float('inf'): + total_norm = -1 + + return total_norm diff --git a/deepspeed/runtime/zero/config.py b/deepspeed/runtime/zero/config.py index 76583c129cb9..9f45bf811bb4 100644 --- a/deepspeed/runtime/zero/config.py +++ b/deepspeed/runtime/zero/config.py @@ -20,6 +20,7 @@ "stage": [0|1|2], "stage3_max_live_parameters" : 1000000000, "stage3_max_reuse_distance" : 1000000000, + "stage3_use_all_reduce_for_fetch_params": [true|false], "allgather_partitions": [true|false], "use_multi_rank_bucket_allreduce": [true|false], "allgather_bucket_size": 500000000, @@ -239,6 +240,12 @@ class DeepSpeedZeroConfig(DeepSpeedConfigModel): new_param="gather_16bit_weights_on_model_save") """ Deprecated, please use ``gather_16bit_weights_on_model_save`` """ + use_all_reduce_for_fetch_params: bool = Field(False, alias="stage3_use_all_reduce_for_fetch_params") + """ + Use all_reduce op when fetching module parameters at stage3. This allows to significantly improve + performance by reducing the overhead of concatenation and slicing on the host. + """ + ignore_unused_parameters: bool = True """ Unused parameters in modules may be unexpected in static networks, but diff --git a/deepspeed/runtime/zero/partition_parameters.py b/deepspeed/runtime/zero/partition_parameters.py index 142259c1b7df..38eb60906f3e 100755 --- a/deepspeed/runtime/zero/partition_parameters.py +++ b/deepspeed/runtime/zero/partition_parameters.py @@ -23,7 +23,7 @@ from deepspeed.utils import groups import deepspeed -from ..utils import see_memory_usage +from ..utils import get_only_unique_item, see_memory_usage from deepspeed.runtime.zero.config import DeepSpeedZeroConfig from deepspeed.runtime.zero.utils import assert_ints_same_as_other_ranks, is_zero_param from deepspeed.runtime.zero.offload_config import OffloadDeviceEnum @@ -56,7 +56,8 @@ def __init__(self, param: Parameter) -> None: self.__param = param def wait(self) -> None: - get_accelerator().current_stream().synchronize() + if not get_accelerator().resolves_data_dependency(): + get_accelerator().current_stream().synchronize() self.__param.ds_status = ZeroParamStatus.AVAILABLE @@ -81,7 +82,8 @@ def wait(self) -> None: if self.__complete: return - get_accelerator().current_stream().synchronize() + if not get_accelerator().resolves_data_dependency(): + get_accelerator().current_stream().synchronize() for param in self.__params: assert param.ds_status == ZeroParamStatus.INFLIGHT, f"expected param {param.ds_summary()} to be inflight" param.ds_status = ZeroParamStatus.AVAILABLE @@ -237,7 +239,7 @@ def wrapped_fn(*args, **kwargs) -> Tensor: kwargs['device'] = torch.device(get_accelerator().device_name(os.environ["LOCAL_RANK"])) tensor: Tensor = fn(*args, **kwargs) if tensor.is_floating_point(): - tensor.data = tensor.data.to(target_fp_dtype) + tensor = tensor.to(target_fp_dtype) #TODO(SW-180933) remove when SW-174023 is resolved return tensor @@ -713,6 +715,31 @@ def wait(self) -> None: handle.wait() +class AllReduceCoalescedHandle: + + def __init__(self, handle, params: List[Parameter]) -> None: + self.handle = handle + self.params = params + self.complete = False + + for param in self.params: + if param.ds_status != ZeroParamStatus.INFLIGHT: + raise RuntimeError(f"expected param {param.ds_summary()} to not be available") + + @instrument_w_nvtx + def wait(self) -> None: + if self.complete: + return + + instrument_w_nvtx(self.handle.wait)() + + for param in self.params: + assert param.ds_status == ZeroParamStatus.INFLIGHT, f"expected param {param.ds_summary()} to be inflight" + param.ds_status = ZeroParamStatus.AVAILABLE + + self.complete = True + + class QuantizationInfo: # a placeholder object to store all quant related vars used in handles def __init__(self) -> None: @@ -905,15 +932,7 @@ def __init__( _ds_config = deepspeed.runtime.config.DeepSpeedConfig(config_dict_or_path, mpu) if config_dict_or_path is not None else None if _ds_config is not None: - if _ds_config.zero_config.memory_efficient_linear and _ds_config.compile_config.enabled: - # memory_efficient_linear displays numerous errors when torch.compile is enabled. - # Refer to https://github.com/pytorch/pytorch/issues/119059 for details. - # Further investigation into performance is necessary, even after resolving this issue because - # the `memory_efficient_linear` module may lead to more graph breaks compared to the original implementation. - logger.warning(f'memory_efficient_linear is disabled when torch.compile is enabled.') - mem_efficient_linear = False - else: - mem_efficient_linear = _ds_config.zero_config.memory_efficient_linear + mem_efficient_linear = _ds_config.zero_config.memory_efficient_linear super().__init__(enabled=enabled, mem_efficient_linear=mem_efficient_linear, ds_config=_ds_config, dtype=dtype) if not dist.is_initialized(): @@ -1000,6 +1019,11 @@ def __init__( if not self.use_all_gather_into_tensor: logger.info(f"all_gather_into_tensor API is not available in torch {torch.__version__}") + self.use_all_reduce_for_fetch_params = get_config_default(DeepSpeedZeroConfig, + "use_all_reduce_for_fetch_params") + if _ds_config is not None: + self.use_all_reduce_for_fetch_params = _ds_config.zero_config.use_all_reduce_for_fetch_params + def _update_persist_config(self, ds_config): Init.apply_param_persistence = True Init.param_persistence_threshold = ds_config.zero_config.param_persistence_threshold @@ -1247,75 +1271,100 @@ def all_gather_coalesced(params: Iterable[Parameter], return AllGatherHandle(handle, param, quantization=quant_info) else: - if not quantize: - dtype_params = defaultdict(list) - for p in params: - dtype_params[p.ds_tensor.dtype].append(p) - handles = [] - for dtype, params in dtype_params.items(): - handles.append(_all_gather_dtype(dtype, params, world_size, rank_in_group, ds_process_group)) + # TODO (SW-164792): Handle use case when use_secondary_tensor is True + if self.use_all_reduce_for_fetch_params and not quantize and not use_secondary_tensor: + # Use all_reduce instead of all_gather to fetch the module params + flat_buffer_size = sum(p.ds_numel_aligned for p in params) + flat_tensor = torch.zeros(flat_buffer_size, + dtype=get_only_unique_item(p.ds_tensor.dtype for p in params), + device=get_accelerator().current_device_name(), + requires_grad=False) + start_param = 0 + for param in params: + param.data = flat_tensor.narrow(0, start_param, param.ds_numel).view(param.ds_shape) + start = start_param + param.ds_tensor.ds_numel * self.get_partition_rank() + flat_tensor.narrow(0, start, param.ds_tensor.ds_numel).copy_(param.ds_tensor) - return MultipleAllGatherHandles(handles) + start_param += param.ds_numel - else: - partition_sz = sum(p.ds_tensor.ds_numel for p in params) + handle = dist.all_reduce(flat_tensor, group=ds_process_group, async_op=True) - if use_secondary_tensor: - partition_sz = sum(p.ds_tensor.ds_numel * p.ds_secondary_tensor_num_of_groups for p in params) + return AllReduceCoalescedHandle(handle=handle, params=params) + else: + if not quantize: + dtype_params = defaultdict(list) + for p in params: + dtype_params[p.ds_tensor.dtype].append(p) + handles = [] + for dtype, params in dtype_params.items(): + handles.append( + _all_gather_dtype(dtype, params, world_size, rank_in_group, ds_process_group)) - flat_tensor = torch.empty(partition_sz * world_size, - dtype=torch.int8, - device=get_accelerator().current_device_name(), - requires_grad=False) + return MultipleAllGatherHandles(handles) - if use_secondary_tensor: - if hasattr(params[0].ds_secondary_tensor, "ds_quant_scale"): - quantized_param = instrument_w_nvtx(torch.cat)([ - p.ds_secondary_tensor.data.to(get_accelerator().current_device_name()) for p in params - ]) - scales = instrument_w_nvtx(torch.cat)([ - p.ds_secondary_tensor.ds_quant_scale.to(get_accelerator().current_device_name()) - for p in params - ]) - else: - quantized_param, scales = self.quantizer_module.quantize( - instrument_w_nvtx(torch.cat)([ - p.ds_secondary_tensor.to(get_accelerator().current_device_name()) for p in params - ])) else: - if hasattr(params[0].ds_tensor, "ds_quant_scale"): - quantized_param = instrument_w_nvtx(torch.cat)( - [p.ds_tensor.data.to(get_accelerator().current_device_name()) for p in params]) - scales = instrument_w_nvtx(torch.cat)([ - p.ds_tensor.ds_quant_scale.to(get_accelerator().current_device_name()) for p in params - ]) + partition_sz = sum(p.ds_tensor.ds_numel for p in params) + + if use_secondary_tensor: + partition_sz = sum(p.ds_tensor.ds_numel * p.ds_secondary_tensor_num_of_groups + for p in params) + + flat_tensor = torch.empty(partition_sz * world_size, + dtype=torch.int8, + device=get_accelerator().current_device_name(), + requires_grad=False) + + if use_secondary_tensor: + if hasattr(params[0].ds_secondary_tensor, "ds_quant_scale"): + quantized_param = instrument_w_nvtx(torch.cat)([ + p.ds_secondary_tensor.data.to(get_accelerator().current_device_name()) + for p in params + ]) + scales = instrument_w_nvtx(torch.cat)([ + p.ds_secondary_tensor.ds_quant_scale.to(get_accelerator().current_device_name()) + for p in params + ]) + else: + quantized_param, scales = self.quantizer_module.quantize( + instrument_w_nvtx(torch.cat)([ + p.ds_secondary_tensor.to(get_accelerator().current_device_name()) + for p in params + ])) else: - quantized_param, scales = self.quantizer_module.quantize( - instrument_w_nvtx(torch.cat)( - [p.ds_tensor.to(get_accelerator().current_device_name()) for p in params])) - quant_scale_buffer = torch.empty( - scales.numel() * world_size, - dtype=torch.float32, - device=get_accelerator().current_device_name(), - requires_grad=False, - ) - handle = _dist_allgather_fn(quantized_param, flat_tensor, ds_process_group) - quant_handle = _dist_allgather_fn(scales, quant_scale_buffer, ds_process_group) - quant_info = QuantizationInfo() - quant_info.quantized_param = flat_tensor - quant_info.backend = self.quantizer_module - quant_info.quant_handle = quant_handle - quant_info.scale_buffer = quant_scale_buffer - quant_info.partition_sz = partition_sz - quant_info.world_size = world_size - return AllGatherCoalescedHandle( - allgather_handle=handle, - params=params, - partitions=None, - world_size=world_size, - use_secondary_tensor=use_secondary_tensor, - quantization=quant_info, - ) + if hasattr(params[0].ds_tensor, "ds_quant_scale"): + quantized_param = instrument_w_nvtx(torch.cat)( + [p.ds_tensor.data.to(get_accelerator().current_device_name()) for p in params]) + scales = instrument_w_nvtx(torch.cat)([ + p.ds_tensor.ds_quant_scale.to(get_accelerator().current_device_name()) + for p in params + ]) + else: + quantized_param, scales = self.quantizer_module.quantize( + instrument_w_nvtx(torch.cat)( + [p.ds_tensor.to(get_accelerator().current_device_name()) for p in params])) + quant_scale_buffer = torch.empty( + scales.numel() * world_size, + dtype=torch.float32, + device=get_accelerator().current_device_name(), + requires_grad=False, + ) + handle = _dist_allgather_fn(quantized_param, flat_tensor, ds_process_group) + quant_handle = _dist_allgather_fn(scales, quant_scale_buffer, ds_process_group) + quant_info = QuantizationInfo() + quant_info.quantized_param = flat_tensor + quant_info.backend = self.quantizer_module + quant_info.quant_handle = quant_handle + quant_info.scale_buffer = quant_scale_buffer + quant_info.partition_sz = partition_sz + quant_info.world_size = world_size + return AllGatherCoalescedHandle( + allgather_handle=handle, + params=params, + partitions=None, + world_size=world_size, + use_secondary_tensor=use_secondary_tensor, + quantization=quant_info, + ) def partition(param_list=None, hierarchy=0, has_been_updated=False): cls = param @@ -1551,6 +1600,7 @@ def _partition_param(self, param, buffer=None, has_been_updated=False): param.ds_tensor.ds_numel = partition_size param.ds_tensor.status = PartitionedParamStatus.AVAILABLE param.ds_tensor.final_location = final_location + param.ds_numel_aligned = tensor_size start = partition_size * self.get_partition_rank() end = start + partition_size @@ -1606,6 +1656,8 @@ def _partition_param_sec(self, param, buffer=None, has_been_updated=False): ##support for NVME secondary param offload #print_rank_0(f"SEC Param id {param.ds_id} status is {param.ds_status}", force=True) if param.ds_status is ZeroParamStatus.AVAILABLE: + if param.ds_secondary_tensor is not None and not has_been_updated: ##param already partitioned + return #check padding tensor_size = self._aligned_size(param) partition_size = tensor_size // self.dp_world_size @@ -1644,7 +1696,8 @@ def _partition_param_sec(self, param, buffer=None, has_been_updated=False): sec_numel).copy_(one_dim_param.narrow(0, secondary_start, sec_numel)) # TODO: This is a temporary fix to avoid the issue that 2nd tensor all-gather happens before 2nd tensor partition is done - get_accelerator().current_stream().synchronize() + if not get_accelerator().resolves_data_dependency(): + get_accelerator().current_stream().synchronize() print_rank_0(f"{param.ds_id} partitioned type {param.dtype} dev {param.device} shape {param.shape}", force=False) @@ -1679,7 +1732,8 @@ def _allgather_param(self, param, async_op=False, hierarchy=0): f'After allocate allgather param {debug_param2name_id_shape_status(param)} {aligned_param_size} {partition_size} ', force=False) - get_accelerator().synchronize() + if not get_accelerator().resolves_data_dependency(): + get_accelerator().synchronize() print_rank_0( f"{'--'* hierarchy}----allgather param with {debug_param2name_id_shape_status(param)} partition size={partition_size}" @@ -1812,7 +1866,8 @@ def _allgather_params_coalesced(self, param_list, hierarchy=0, quantize=False): param.data = gathered_tensor.narrow(0, 0, param.ds_numel).view(param.ds_shape).data # guarantee the communication to be completed - get_accelerator().synchronize() + if not get_accelerator().resolves_data_dependency(): + get_accelerator().synchronize() return None diff --git a/deepspeed/runtime/zero/stage3.py b/deepspeed/runtime/zero/stage3.py index 03813afa4ed1..50cbffa4f97a 100644 --- a/deepspeed/runtime/zero/stage3.py +++ b/deepspeed/runtime/zero/stage3.py @@ -11,11 +11,11 @@ from deepspeed.utils import groups from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors -from deepspeed.runtime import ZeROOptimizer +from deepspeed.runtime.base_optimizer import ZeROOptimizer from deepspeed.utils import logger from deepspeed.runtime.fp16.loss_scaler import CreateLossScaler from deepspeed.runtime.comm.coalesced_collectives import reduce_scatter_coalesced, all_to_all_quant_reduce -from deepspeed.runtime.utils import inf, get_global_norm, is_model_parallel_parameter, get_only_unique_item +from deepspeed.runtime.utils import inf, is_model_parallel_parameter, get_only_unique_item from deepspeed.runtime.zero.partition_parameters import * from deepspeed.runtime.zero.config import ZeroStageEnum from deepspeed.runtime.zero.offload_config import OffloadDeviceEnum @@ -215,14 +215,12 @@ def __init__( self.module = module self.elastic_checkpoint = elastic_checkpoint - self.inf_or_nan_tracker: Tensor = torch.zeros(1, - dtype=torch.bool, - device=get_accelerator().current_device_name(), - requires_grad=False) + self.device = get_accelerator().current_device_name() if not self.offload_optimizer else OffloadDeviceEnum.cpu + + self.inf_or_nan_tracker: Tensor = torch.zeros(1, dtype=torch.bool, device=self.device, requires_grad=False) self.deepspeed_adam_offload = (self.offload_optimizer and type(init_optimizer) == DeepSpeedCPUAdam) - self.device = get_accelerator().current_device_name() if not self.offload_optimizer else OffloadDeviceEnum.cpu ### streams used for overlapping computation with communication self.reduce_and_partition_stream = None if get_accelerator().is_synchronized_device() else get_accelerator( ).Stream() if overlap_comm else get_accelerator().default_stream() @@ -1412,7 +1410,7 @@ def complete_grad_norm_calculation_for_cpu_offload(self, params): err = torch.tensor(-1.0, device=self.device, dtype=torch.float) total_norm = inf_or_nan * err + inf_or_nan.logical_not() * total_norm - return total_norm + return total_norm.cpu() @instrument_w_nvtx def partition_grads(self, params_to_release: List[Parameter], grad_partitions: List[Tensor]) -> None: @@ -2027,7 +2025,7 @@ def step(self, closure=None): return norm_groups = self._get_norm_groups() - scaled_global_grad_norm = get_global_norm(norm_list=norm_groups) + scaled_global_grad_norm = torch.norm(torch.stack(norm_groups)) # Stash unscaled gradient norm self._global_grad_norm = scaled_global_grad_norm / self.loss_scale @@ -2111,8 +2109,8 @@ def unscale_and_clip_grads(self, sub_group_id, total_norm): if self.clip_grad > 0.: # norm is in fact norm*scale clip = ((total_norm / self.loss_scale) + 1e-6) / self.clip_grad - if clip > 1: - combined_scale = clip * self.loss_scale + clip = torch.clamp(clip, min=1.0) + combined_scale = clip * self.loss_scale self.fp32_partitioned_groups_flat[sub_group_id].grad.mul_(1. / combined_scale) @@ -2147,7 +2145,8 @@ def has_overflow(self, partition_gradients=True): self.inf_or_nan_tracker += torch.isnan(self.grad_partitions_flat_buffer).any() self.inf_or_nan_tracker = self.inf_or_nan_tracker > 0 - overflow_gpu = self.inf_or_nan_tracker.clone().to(torch.uint8) + overflow_gpu = self.inf_or_nan_tracker.clone().to(get_accelerator().current_device_name()).to( + torch.uint8) self.inf_or_nan_tracker.zero_() if not get_accelerator().resolves_data_dependency(): @@ -2381,9 +2380,9 @@ def _set_param_groups(self, value): # Promote loss scale so it can be retrieved or set via "fp16_optimizer_instance.loss_scale" def _get_loss_scale(self): if self.custom_loss_scaler: - return self.external_loss_scale + return float(self.external_loss_scale) else: - return self.loss_scaler.cur_scale + return float(self.loss_scaler.cur_scale) def _set_loss_scale(self, value): self.loss_scaler.cur_scale = value diff --git a/deepspeed/runtime/zero/stage_1_and_2.py b/deepspeed/runtime/zero/stage_1_and_2.py index 71a01b2391f8..f1ddd2dc53a3 100755 --- a/deepspeed/runtime/zero/stage_1_and_2.py +++ b/deepspeed/runtime/zero/stage_1_and_2.py @@ -4,21 +4,20 @@ # DeepSpeed Team import torch -import os from deepspeed import comm as dist from packaging import version as pkg_version from collections import OrderedDict from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors -from deepspeed.runtime import ZeROOptimizer +from deepspeed.runtime.base_optimizer import ZeROOptimizer from deepspeed.runtime.fp16.loss_scaler import CreateLossScaler -from deepspeed.runtime.utils import (bwc_tensor_model_parallel_rank, empty_cache, see_memory_usage, inf, - is_model_parallel_parameter, align_dense_tensors, all_gather_dp_groups) - +from deepspeed.runtime.utils import (empty_cache, see_memory_usage, inf, is_model_parallel_parameter, + align_dense_tensors, all_gather_dp_groups) from deepspeed.runtime.zero.config import ZeroStageEnum from deepspeed.runtime.zero.offload_config import OffloadDeviceEnum from deepspeed.ops.adam import DeepSpeedCPUAdam from deepspeed.utils import logger +from deepspeed.utils.bwc import bwc_tensor_model_parallel_rank from deepspeed.moe.utils import is_moe_param from deepspeed.git_version_info import version @@ -1360,7 +1359,7 @@ def reduce_ipg_grads(self): self.average_tensor(extra_large_grad_reduc.view(-1)) self.extra_large_param_to_reduce = None else: - self.average_tensor(self.ipg_buffer[self.ipg_index]) + self.average_tensor(self.ipg_buffer[self.ipg_index].narrow(0, 0, self.elements_in_ipg_bucket)) else: self.buffered_reduce_fallback(None, self.grads_in_ipg_bucket, @@ -1957,8 +1956,8 @@ def unscale_and_clip_grads(self, grad_groups_flat, total_norm): if self.clip_grad > 0.: # norm is in fact norm*scale clip = ((total_norm / self.loss_scale) + 1e-6) / self.clip_grad - if clip > 1: - combined_scale = clip * self.loss_scale + clip = torch.clamp(clip, min=1.0) + combined_scale = clip * self.loss_scale for grad in grad_groups_flat: if isinstance(grad, list): @@ -2082,9 +2081,9 @@ def _set_param_groups(self, value): # Promote loss scale so it can be retrieved or set via "fp16_optimizer_instance.loss_scale" def _get_loss_scale(self): if self.custom_loss_scaler: - return self.external_loss_scale + return float(self.external_loss_scale) else: - return self.loss_scaler.cur_scale + return float(self.loss_scaler.cur_scale) def _set_loss_scale(self, value): self.loss_scaler.cur_scale = value @@ -2287,32 +2286,13 @@ def load_state_dict(self, self._load_legacy_checkpoint(state_dict_list, load_optimizer_states, load_from_fp32_weights) def _load_universal_checkpoint(self, checkpoint_folder, load_optimizer_states, load_from_fp32_weights): - self._load_hp_checkpoint_state(checkpoint_folder) + self.load_hp_checkpoint_state_from_checkpoint_dir("bit16_groups", checkpoint_folder) @property def param_groups(self): """Forward the wrapped optimizer's parameters.""" return self.optimizer.param_groups - def _load_hp_checkpoint_state(self, checkpoint_dir): - checkpoint_dir = os.path.join(checkpoint_dir, "zero") - optim_state_path = os.path.join(checkpoint_dir, "optimizer_state.pt") - assert os.path.isfile( - optim_state_path), f'{optim_state_path} containing optimizer global state is missing! Cannot proceed.' - optim_sd = torch.load(optim_state_path) - self._load_global_state(optim_sd) - - tp_rank = bwc_tensor_model_parallel_rank(mpu=self.mpu) - tp_world_size = self.mpu.get_slice_parallel_world_size() if hasattr(self.mpu, "get_slice_parallel_world_size") \ - else self.mpu.get_tensor_model_parallel_world_size() - - for i, _ in enumerate(self.optimizer.param_groups): - for lp in self.bit16_groups[i]: - if lp._hp_mapping is not None: - #print(f"Loading {self.param_names[lp]} {tp_rank=} {tp_world_size=}") - lp.load_hp_checkpoint_state(os.path.join(checkpoint_dir, self.param_names[lp]), tp_rank, - tp_world_size) - def _load_global_state(self, sd): self.loss_scaler = sd.get(LOSS_SCALER, self.loss_scaler) self.dynamic_loss_scale = sd.get('dynamic_loss_scale', self.dynamic_loss_scale) @@ -2446,7 +2426,9 @@ def estimate_zero2_model_states_mem_needs(total_params, gpu_mem = 2 * total_params cpu_mem = total_params * max(4 * total_gpus, 16) * additional_buffer_factor else: - gpu_mem = 4 * total_params + int(16 * total_params / total_gpus) + # GPU's total_params multipliers: 2 = params_16bit, + # 18 = 2_grads_16bit + 4_grads_32bit + 4_params_32bit + 8_optimizer_states_32bit(momentum and variance) + gpu_mem = 2 * total_params + int(18 * total_params / total_gpus) cpu_mem = total_params * 4 * num_gpus_per_node * additional_buffer_factor return int(cpu_mem), int(gpu_mem) diff --git a/deepspeed/runtime/zero/utils.py b/deepspeed/runtime/zero/utils.py index f61715bd4387..e26004c605c3 100755 --- a/deepspeed/runtime/zero/utils.py +++ b/deepspeed/runtime/zero/utils.py @@ -65,7 +65,6 @@ def get_lst_from_rank0(lst: List[int]) -> None: lst_tensor = torch.tensor( lst if dist.get_rank() == 0 else [-1] * len(lst), dtype=int, - # device=get_accelerator().current_device_name(), device=torch.device(get_accelerator().device_name(os.environ["LOCAL_RANK"])), requires_grad=False, ) diff --git a/deepspeed/tools/__init__.py b/deepspeed/tools/__init__.py new file mode 100644 index 000000000000..36d8fbe03558 --- /dev/null +++ b/deepspeed/tools/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) 2023 Habana Labs, Ltd. an Intel Company +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team diff --git a/deepspeed/tools/pg_sim/__init__.py b/deepspeed/tools/pg_sim/__init__.py new file mode 100644 index 000000000000..917c5a255fa8 --- /dev/null +++ b/deepspeed/tools/pg_sim/__init__.py @@ -0,0 +1,7 @@ +# Copyright (c) 2023 Habana Labs, Ltd. an Intel Company +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .pg import install_sim_dist_backend diff --git a/deepspeed/tools/pg_sim/pg.py b/deepspeed/tools/pg_sim/pg.py new file mode 100644 index 000000000000..b7e2483f86d0 --- /dev/null +++ b/deepspeed/tools/pg_sim/pg.py @@ -0,0 +1,306 @@ +# Copyright (c) 2023 Habana Labs, Ltd. an Intel Company +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import warnings +import torch +import torch.distributed as dist + +from datetime import timedelta +from functools import partial +from torch.distributed.constants import default_pg_timeout +from torch.distributed.distributed_c10d import (GroupMember, BroadcastOptions, AllreduceOptions, ReduceOp) +from torch.distributed.distributed_c10d import STORE_BASED_BARRIER_PREFIX # noqa +from deepspeed.accelerator import get_accelerator + + +class SimProcessGroup: + BACKEND = "sim" + DEFAULT_PG = None + WORLD_SIZE = -1 + STORE = None + + def __init__(self, rank, world_size, timeout, backend): + self.sim_rank = rank + self.pg_world_size = world_size + self.timeout = timeout + self.backend = backend + self.pg = None + self.torch_ver_major = int(torch.__version__.split('.')[0]) + self.torch_ver_minor = int(torch.__version__.split('.')[1]) + + assert self.torch_ver_major == 1, \ + f"Torch version major != 1 is not supported (version={torch.__version__})" + assert self.torch_ver_minor >= 10, \ + f"Torch version < 1.10 is not supported (version={torch.__version__})" + + if self.torch_ver_minor < 13: + warnings.warn(f"Torch version < 1.13 is not tested (version={torch.__version__})") + + # default is the first process group created + if SimProcessGroup.DEFAULT_PG is None: + SimProcessGroup.DEFAULT_PG = self + + @staticmethod + def get_dist_group_count(): + return torch.distributed.distributed_c10d._group_count + + @classmethod + def store_add_rest_of_world(cls, next_group): + group = cls.get_dist_group_count() + (1 if next_group else 0) + store_key = f"{STORE_BASED_BARRIER_PREFIX}:{group}" + cls.STORE.add(store_key, cls.WORLD_SIZE - 1) + + def _create_pg(self): + self.store_add_rest_of_world(next_group=False) + pg = dist.new_group(ranks=[0], timeout=self.timeout, backend=self.backend, pg_options=None) + return pg + + def post_create_sim_group(self): + self.pg = self._create_pg() + + @classmethod + def default_pg(cls): + assert cls.DEFAULT_PG is not None + return cls.DEFAULT_PG + + def size(self): + return self.pg_world_size + + def rank(self): + return self.sim_rank + + # ---------------------------------------------------- + # P2P + # + # P2P operations are simulated as all_reduce + # ---------------------------------------------------- + class P2PRequestObject: + """ Dummy p2p request object that is returned for p2p ops""" + + def __init__(self, src): + self.src = src + + def wait(self): + return + + def is_completed(self): + return True + + def _source_rank(self): + return self.src + + def _p2p_op(self, tensor_list, src=None): + opts = AllreduceOptions() + if self.torch_ver_minor > 10: + opts.reduceOp = ReduceOp.SUM + self.pg.allreduce(tensor_list, opts).wait() + src = src if src is not None else self.sim_rank + return SimProcessGroup.P2PRequestObject(src=src) + + def send(self, tensor_list, _group_dst_rank, _tag): + return self._p2p_op(tensor_list) + + def recv_anysource(self, tensor_list, _tag): + return self._p2p_op(tensor_list) + + def recv(self, tensor_list, src, _tag): + return self._p2p_op(tensor_list, src=src) + + # ---------------------------------------------------- + # Collectives + # + # For some collectives, it is required to shrink the + # input/output tensors_list to 1-element (world_size=1). + # also, need to make all other members of tensors_list to depend + # on the first element - to prevent incorrect graph signaling. + # The logic of shrink and then copy is handled by: + # - _adjust_tensors_list_to_ws1 + # - _copy_data_from_tensor_to_tensor_list + # ---------------------------------------------------- + @staticmethod + def _to_device(tensors, device): + if isinstance(tensors, dict): + return {k: SimProcessGroup._to_device(v, device) for k, v in tensors.items()} + elif isinstance(tensors, list): + return [SimProcessGroup._to_device(v, device) for v in tensors] + elif isinstance(tensors, torch.Tensor): + return tensors.to(device) + else: + assert False, 'Unsupported tensors type' + + def broadcast(self, tensors, opts): + """ ignore opts.rootRank and override to be the source """ + opts.rootRank = self.sim_rank + tensors = self._to_device(tensors, get_accelerator().current_device_name()) + return self.pg.broadcast(tensors, opts) + + def allreduce(self, tensors, opts): + return self.pg.allreduce(tensors, opts) + + def allreduce_coalesced(self, tensors, opts): + return self.pg.allreduce_coalesced(tensors, opts) + + def reduce(self, tensors, opts): + if opts.rootRank == self.sim_rank: + return self.pg.reduce(tensors, opts) + + broadcast_opts = BroadcastOptions() + broadcast_opts.rootRank = self.sim_rank + broadcast_opts.rootTensor = opts.rootTensor + return self.pg.broadcast(tensors, broadcast_opts) + + def _adjust_tensors_list_to_ws1(self, tensors_list): + """ receives list of lists of tensors and returns lists + of list-size-1 to match the world_size=1 + """ + world1_tensors_list = [] + for i, tensors in enumerate(tensors_list): + world1_tensors_list.append(tensors[self.sim_rank:self.sim_rank + 1]) + return world1_tensors_list + + @staticmethod + def _copy_data_from_tensor_to_tensor_list(source_tensors, tensors_list): + """ copy data from source tensors to all tensors in tensor list """ + for i, tensors in enumerate(tensors_list): + for t in tensors: + t.data[:] = source_tensors[i][0].data[:] + + def allgather(self, tensors_list, input_tensors, *kwargs): + world1_tensors_list = self._adjust_tensors_list_to_ws1(tensors_list) + handle = self.pg.allgather(world1_tensors_list, input_tensors, *kwargs) + self._copy_data_from_tensor_to_tensor_list(world1_tensors_list, tensors_list) + return handle + + def gather(self, output_tensors, input_tensors, opts): + if opts.rootRank == self.sim_rank: + world1_tensors_list = self._adjust_tensors_list_to_ws1(output_tensors) + handle = self.pg.gather(world1_tensors_list, input_tensors, opts) + self._copy_data_from_tensor_to_tensor_list(world1_tensors_list, output_tensors) + return handle + + broadcast_opts = BroadcastOptions() + broadcast_opts.rootRank = self.sim_rank + return self.pg.broadcast(input_tensors, broadcast_opts) + + def scatter(self, output_tensors, input_tensors, opts): + if opts.rootRank == self.sim_rank: + world1_tensors_list = self._adjust_tensors_list_to_ws1(input_tensors) + handle = self.pg.scatter(output_tensors, world1_tensors_list, opts) + self._copy_data_from_tensor_to_tensor_list(world1_tensors_list, input_tensors) + return handle + + broadcast_opts = BroadcastOptions() + broadcast_opts.rootRank = self.sim_rank + return self.pg.broadcast(output_tensors, broadcast_opts) + + def reduce_scatter(self, output_tensors, input_tensors, opts): + world1_tensors_list = self._adjust_tensors_list_to_ws1(input_tensors) + handle = self.pg.reduce_scatter(output_tensors, world1_tensors_list, opts) + self._copy_data_from_tensor_to_tensor_list(world1_tensors_list, input_tensors) + return handle + + def alltoall(self, output_tensors, input_tensors, _opts): + world1_in_tensors_list = input_tensors[self.sim_rank:self.sim_rank + 1] + world1_out_tensors_list = output_tensors[self.sim_rank:self.sim_rank + 1] + world1_out_tensors_list[0].data[:] = world1_in_tensors_list[0].data[:] + opts = AllreduceOptions() + if self.torch_ver_minor > 10: + opts.reduceOp = ReduceOp.SUM + handle = self.pg.allreduce(world1_out_tensors_list, opts) + return handle + + def barrier(self, opts): + opts.device_ids = [self.sim_rank] + return self.pg.barrier(opts) + + # ---------------------------------------------------- + # Create group registered function + # ---------------------------------------------------- + @classmethod + def create(cls, _store, rank, world_size, timeout, backend): + return cls(rank, world_size, timeout, backend) + + +def install_sim_dist_backend(sim_world_size, sim_rank): + + def wrapped_dist_init_process_group(backend, + init_method=None, + timeout=default_pg_timeout, + world_size=-1, + rank=-1, + store=None, + group_name="", + pg_options=None): + assert world_size == -1 or world_size == sim_world_size, \ + f'Inconsistent world_size: sim={sim_world_size} dist_init={world_size}' + + assert rank == -1 or rank == sim_rank, \ + f'Inconsistent rank: sim={sim_rank} dist_init={rank}' + + if backend == 'hccl': + import habana_frameworks.torch.distributed.hccl # noqa: F401 + + # override provided init_method/store with a dummy store + # For debug, it is better to use FileStore: + # import os + # my_store_filename = '/tmp/my_store' + # os.remove(my_store_filename) if os.path.exists(my_store_filename) else None + # os.remove(my_store_filename) + # store = torch.distributed.FileStore(my_store_filename, world_size) + store = torch.distributed.TCPStore(host_name="localhost", + port=12355, + world_size=sim_world_size, + is_master=True, + timeout=timedelta(seconds=300), + wait_for_workers=False) + + # set the simulated world size + SimProcessGroup.WORLD_SIZE = sim_world_size + SimProcessGroup.STORE = store + + # register sim backend + # create_fn = partial(SimProcessGroup.create, backend=default_backend) + create_fn = partial(SimProcessGroup.create, backend=backend) + dist.Backend.register_backend(SimProcessGroup.BACKEND, create_fn) + + # emulate all other world devices has joined the newly created group + SimProcessGroup.store_add_rest_of_world(next_group=True) + + orig_dist_init_process_group(backend=SimProcessGroup.BACKEND, + timeout=timeout, + world_size=sim_world_size, + rank=sim_rank, + store=store, + group_name=group_name, + pg_options=pg_options) + + SimProcessGroup.default_pg().post_create_sim_group() + + def wrapped_dist_new_group(ranks=None, timeout=default_pg_timeout, backend=None, pg_options=None): + SimProcessGroup.store_add_rest_of_world(next_group=True) + pg = orig_dist_new_group(ranks=ranks, timeout=timeout, backend=backend, pg_options=pg_options) + + if pg != GroupMember.NON_GROUP_MEMBER: + if backend is None or backend == SimProcessGroup.BACKEND: + pg.post_create_sim_group() + + return pg + + def wrapped_dist_broadcast_object_list(object_list, src=0, group=None, device=None): + rank = SimProcessGroup.default_pg().sim_rank + if src != sim_rank: + raise RuntimeError(f'SimProcessGroup does not support dist.broadcast_object_list() ' + f'for src={src} different than sim_rank={rank}') + return orig_dist_broadcast_object_list(object_list, src, group, device) + + orig_dist_init_process_group = dist.init_process_group + dist.init_process_group = wrapped_dist_init_process_group + + orig_dist_new_group = dist.new_group + dist.new_group = wrapped_dist_new_group + + orig_dist_broadcast_object_list = dist.broadcast_object_list + dist.broadcast_object_list = wrapped_dist_broadcast_object_list diff --git a/deepspeed/tools/pg_sim/ut/base.py b/deepspeed/tools/pg_sim/ut/base.py new file mode 100644 index 000000000000..24889f944070 --- /dev/null +++ b/deepspeed/tools/pg_sim/ut/base.py @@ -0,0 +1,311 @@ +# Copyright (c) 2023 Habana Labs, Ltd. an Intel Company +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import unittest +import functools +import torch +import torch.distributed as dist +import pytest + +from pg_sim.pg import (install_sim_dist_backend, GroupMember) + + +class TestBaseWrapper: + """ + BaseTestWrapper class ensures that the test cases encapsulated + in ProcessGroupSimTestBase will only be executed by subclasses. + """ + + class ProcessGroupSimTestBase(unittest.TestCase): + + def setUp(self) -> None: + self.world_size = 8 + self.rank = 0 + self.backend = self.get_backend() + self.device = self.get_device() + + self.assertIsNotNone(self.backend) + self.assertIsNotNone(self.device) + + install_sim_dist_backend(sim_world_size=self.world_size, sim_rank=self.rank) + + dist.init_process_group(backend=self.backend, + init_method=None, + store=None, + rank=self.rank, + world_size=self.world_size) + + def get_backend(self): + self.assertTrue(False, msg='get_backend must be implemented by derived test') + + def get_device(self): + self.assertTrue(False, msg='get_device must be implemented by derived test') + + def _get_row_first_rank(self): + row_ranks = list(set(range(self.world_size)) - {self.rank}) + return row_ranks[0] if row_ranks else None + + @staticmethod + def _get_torch_version(): + return int(torch.__version__.split('.')[1]) + + @pytest.mark.forked + def test_world(self): + res_rank = dist.get_rank() + res_ws = dist.get_world_size() + self.assertEqual(res_rank, self.rank) + self.assertEqual(res_ws, self.world_size) + + @pytest.mark.forked + def test_new_group(self): + t = torch.tensor([1, 2]).to(self.device) + t_in_out = t.clone() + + pg_1 = dist.new_group(ranks=[self.rank]) + dist.all_reduce(t_in_out, op=dist.ReduceOp.SUM, group=pg_1) + self.assertTrue(t.eq(t_in_out).all()) + + row_rank = self._get_row_first_rank() + if row_rank: + pg_2 = dist.new_group(ranks=[row_rank]) + self.assertEqual(pg_2, GroupMember.NON_GROUP_MEMBER) + else: + self.skipTest(f'Skipping {self._testMethodName}') + + def _test_broadcast_impl(self, src): + t = torch.tensor([1, 2]).to(self.device) + handle = dist.broadcast(t, src=src, async_op=False) + self.assertIsNone(handle) + + t = torch.tensor([1, 2]).to(self.device) + handle = dist.broadcast(t, src=src, async_op=True) + self.assertIsNotNone(handle) + handle.wait() + + @pytest.mark.forked + def test_broadcast_src(self): + self._test_broadcast_impl(src=self.rank) + + @pytest.mark.forked + def test_broadcast_dst(self): + row_rank = self._get_row_first_rank() + if row_rank: + self._test_broadcast_impl(src=row_rank) + else: + self.skipTest(f'Skipping {self._testMethodName}') + + def _test_broadcast_object_type_impl(self, src): + if dist.get_rank() == src: + objects = ["foo", 12, {1: 2}] + else: + objects = [None, None, None] + + dev = torch.device(self.device) + dist.broadcast_object_list(objects, src=src, device=dev) + + @pytest.mark.forked + def test_broadcast_object_type_src(self): + self._test_broadcast_object_type_impl(src=self.rank) + + @pytest.mark.forked + def test_broadcast_object_type_dst(self): + row_rank = self._get_row_first_rank() + if row_rank: + with pytest.raises(RuntimeError): + self._test_broadcast_object_type_impl(src=row_rank) + else: + self.skipTest(f'Skipping {self._testMethodName}') + + @pytest.mark.forked + def test_all_reduce(self): + t = torch.tensor([1, 2]).to(self.device) + t_in_out = t.clone() + dist.all_reduce(t_in_out, op=dist.ReduceOp.SUM) + self.assertTrue(t.eq(t_in_out).all()) + + def _test_reduce_impl(self, dst): + t = torch.tensor([1.0, 2.0]).to(self.device) + t_in_out = t.clone() + + handle = dist.reduce(t_in_out, dst=dst, op=dist.ReduceOp.SUM, async_op=False) + self.assertIsNone(handle) + self.assertTrue(t.eq(t_in_out).all()) + + handle = dist.reduce(t_in_out, dst=dst, op=dist.ReduceOp.SUM, async_op=True) + self.assertIsNotNone(handle) + handle.wait() + self.assertTrue(t.eq(t_in_out).all()) + + @pytest.mark.forked + def test_reduce_src(self): + self._test_reduce_impl(dst=self.rank) + + @pytest.mark.forked + def test_reduce_dst(self): + row_rank = self._get_row_first_rank() + if row_rank: + self._test_reduce_impl(dst=row_rank) + else: + self.skipTest(f'Skipping {self._testMethodName}') + + @pytest.mark.forked + def test_all_gather(self): + tensor_list = [torch.zeros(2).to(self.device) for _ in range(self.world_size)] + tensor = torch.ones(2).to(self.device) + + handle = dist.all_gather(tensor_list, tensor, async_op=False) + self.assertIsNone(handle) + self.assertTrue(tensor_list[0].eq(tensor).all()) + + handle = dist.all_gather(tensor_list, tensor, async_op=True) + self.assertIsNotNone(handle) + handle.wait() + self.assertTrue(tensor_list[0].eq(tensor).all()) + + def _test_gather_impl(self, dst, local_dst): + torch_version = self._get_torch_version() + if (self.backend == 'nccl') and (torch_version <= 10): + self.skipTest(f'Skipping {self._testMethodName} for nccl ' + f'for torch.version={torch_version}') + + tensor = torch.ones(2).to(self.device) + gather_list = [torch.zeros(2).to(self.device) for _ in range(self.world_size)] if local_dst else None + + handle = dist.gather(tensor, gather_list, dst=dst, async_op=False) + self.assertIsNone(handle) + if local_dst: + self.assertTrue(gather_list[dst].eq(tensor).all()) + + handle = dist.gather(tensor, gather_list, dst=dst, async_op=True) + self.assertIsNotNone(handle) + handle.wait() + if local_dst: + self.assertTrue(gather_list[dst].eq(tensor).all()) + + @pytest.mark.forked + def test_gather_src(self): + self._test_gather_impl(dst=self.rank, local_dst=True) + + @pytest.mark.forked + def test_gather_not_src(self): + row_rank = self._get_row_first_rank() + if row_rank: + self._test_gather_impl(dst=row_rank, local_dst=False) + else: + self.skipTest(f'Skipping {self._testMethodName}') + + def _test_scatter_impl(self, src, local_src): + if self.backend not in ('gloo', 'mpi'): + self.skipTest(f'Skipping {self._testMethodName} for {self.backend}') + + tensor = torch.ones(2).to(self.device) + scatter_list = [torch.zeros(2).to(self.device) for _ in range(self.world_size)] if local_src else None + + handle = dist.scatter(tensor, scatter_list, src=src, async_op=False) + self.assertIsNone(handle) + if local_src: + self.assertTrue(scatter_list[src].eq(tensor).all()) + + handle = dist.scatter(tensor, scatter_list, src=src, async_op=True) + self.assertIsNotNone(handle) + handle.wait() + if local_src: + self.assertTrue(scatter_list[src].eq(tensor).all()) + + @pytest.mark.forked + def test_scatter_src(self): + self._test_scatter_impl(src=self.rank, local_src=True) + + @pytest.mark.forked + def test_scatter_not_src(self): + row_rank = self._get_row_first_rank() + if row_rank: + self._test_scatter_impl(src=row_rank, local_src=False) + else: + self.skipTest(f'Skipping {self._testMethodName}') + + @pytest.mark.forked + def test_reduce_scatter(self): + if self.backend not in ('nccl', 'hccl'): + self.skipTest(f'Skipping {self._testMethodName} for {self.backend}') + + output = torch.ones(2).to(self.device) + input_list = [torch.zeros(2).to(self.device) for _ in range(self.world_size)] + + handle = dist.reduce_scatter(output, input_list, async_op=False) + self.assertIsNone(handle) + self.assertTrue(input_list[self.rank].eq(output).all()) + + handle = dist.reduce_scatter(output, input_list, async_op=True) + self.assertIsNotNone(handle) + handle.wait() + self.assertTrue(input_list[self.rank].eq(output).all()) + + @pytest.mark.forked + def test_all_to_all(self): + if self.backend not in ('nccl', 'hccl', 'mpi'): + self.skipTest(f'Skipping {self._testMethodName} for {self.backend}') + + output_list = [torch.zeros(1).to(self.device) for _ in range(self.world_size)] + input_list = list( + torch.arange(self.world_size, dtype=torch.float32).add(1.).to(self.device).chunk(self.world_size)) + + expected_res = [ + torch.zeros(1).to(self.device) if i != self.rank else torch.ones(1).to(self.device) + for i in range(self.world_size) + ] + + handle = dist.all_to_all(output_list, input_list, async_op=False) + self.assertIsNone(handle) + self.assertTrue( + functools.reduce(lambda x, y: x and y, map(lambda p, q: p == q, expected_res, output_list), True)) + + handle = dist.all_to_all(output_list, input_list, async_op=True) + self.assertIsNotNone(handle) + handle.wait() + self.assertTrue( + functools.reduce(lambda x, y: x and y, map(lambda p, q: p == q, expected_res, output_list), True)) + + @pytest.mark.forked + def test_barrier(self): + handle = dist.barrier(async_op=False) + self.assertIsNone(handle) + + handle = dist.barrier(async_op=True) + self.assertIsNotNone(handle) + handle.wait() + + @pytest.mark.forked + def test_p2p_send(self): + tensor = torch.ones(2).to(self.device) + dist.send(tensor, dst=self.rank, group=None, tag=0) + + row_rank = self._get_row_first_rank() + dist.send(tensor, dst=row_rank, group=None, tag=0) if row_rank else None + + handle = dist.isend(tensor, dst=self.rank, group=None, tag=0) + self.assertIsNotNone(handle) + handle.wait() + + handle = dist.isend(tensor, dst=row_rank, group=None, tag=0) + self.assertIsNotNone(handle) + handle.wait() + + @pytest.mark.forked + def test_p2p_recv(self): + tensor = torch.zeros(2).to(self.device) + dist.recv(tensor, src=self.rank, group=None, tag=0) + + row_rank = self._get_row_first_rank() + dist.recv(tensor, src=row_rank, group=None, tag=0) if row_rank else None + + handle = dist.irecv(tensor, src=self.rank, group=None, tag=0) + self.assertIsNotNone(handle) + handle.wait() + + handle = dist.irecv(tensor, src=row_rank, group=None, tag=0) + self.assertIsNotNone(handle) + handle.wait() diff --git a/deepspeed/tools/pg_sim/ut/gloo.py b/deepspeed/tools/pg_sim/ut/gloo.py new file mode 100644 index 000000000000..347f9b96a741 --- /dev/null +++ b/deepspeed/tools/pg_sim/ut/gloo.py @@ -0,0 +1,24 @@ +# Copyright (c) 2023 Habana Labs, Ltd. an Intel Company +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import unittest +from pg_sim.ut.base import TestBaseWrapper + + +class ProcessGroupSimTestGloo(TestBaseWrapper.ProcessGroupSimTestBase): + + def setUp(self) -> None: + super(ProcessGroupSimTestGloo, self).setUp() + + def get_backend(self): + return 'gloo' + + def get_device(self): + return 'cpu' + + +if __name__ == '__main__': + unittest.main() diff --git a/deepspeed/tools/pg_sim/ut/hccl.py b/deepspeed/tools/pg_sim/ut/hccl.py new file mode 100644 index 000000000000..37fabcfb9363 --- /dev/null +++ b/deepspeed/tools/pg_sim/ut/hccl.py @@ -0,0 +1,24 @@ +# Copyright (c) 2023 Habana Labs, Ltd. an Intel Company +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import unittest +from pg_sim.ut.base import TestBaseWrapper + + +class ProcessGroupSimTestHccl(TestBaseWrapper.ProcessGroupSimTestBase): + + def setUp(self) -> None: + super(ProcessGroupSimTestHccl, self).setUp() + + def get_backend(self): + return 'hccl' + + def get_device(self): + return 'hpu' + + +if __name__ == '__main__': + unittest.main() diff --git a/deepspeed/tools/pg_sim/ut/nccl.py b/deepspeed/tools/pg_sim/ut/nccl.py new file mode 100644 index 000000000000..da6b8f23ed47 --- /dev/null +++ b/deepspeed/tools/pg_sim/ut/nccl.py @@ -0,0 +1,24 @@ +# Copyright (c) 2023 Habana Labs, Ltd. an Intel Company +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import unittest +from pg_sim.ut.base import TestBaseWrapper + + +class ProcessGroupSimTestNccl(TestBaseWrapper.ProcessGroupSimTestBase): + + def setUp(self) -> None: + super(ProcessGroupSimTestNccl, self).setUp() + + def get_backend(self): + return 'nccl' + + def get_device(self): + return 'cuda' + + +if __name__ == '__main__': + unittest.main() diff --git a/deepspeed/tools/tensor_logger/__init__.py b/deepspeed/tools/tensor_logger/__init__.py new file mode 100644 index 000000000000..55ea7227e48d --- /dev/null +++ b/deepspeed/tools/tensor_logger/__init__.py @@ -0,0 +1,7 @@ +# Copyright (c) 2023 Habana Labs, Ltd. an Intel Company +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .tensor_logger import TensorLogger, save_logged_tensors diff --git a/deepspeed/tools/tensor_logger/tensor_logger.py b/deepspeed/tools/tensor_logger/tensor_logger.py new file mode 100644 index 000000000000..f128b9a6b0a0 --- /dev/null +++ b/deepspeed/tools/tensor_logger/tensor_logger.py @@ -0,0 +1,234 @@ +# Copyright (c) 2023 Habana Labs, Ltd. an Intel Company +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch +import collections +from functools import partial +from contextlib import contextmanager +from torch.functional import Tensor +from os import makedirs +from os.path import join + + +class TensorLogger: + """ Records nn.Module's activations and gradients tensors + + Description: + Records up to end_iteration(if 0, recording is disabled) from start_iteration. + If log_activations_enabled, nn.Module's activations are recorded during forward. + If log_grads_enabled, nn.Module's gradients are recorded during back propagation. + If log_inputs_enabled, model inputs are recorded. + + Usage: + Integrated within the training loop: + tensor_logger = TensorLogger(model, start_iteration=2, end_iteration=2) + # dumps second iteration only, iteration number starts with 1 + + for i, samples in enumerate(data_loader) # training loop + with tensor_logger.log_iteration(i): + # run forward/backward iteration + + tensor_logger.save(filename) + + Another alternative: + tensor_logger = TensorLogger(model, end_iteration=2) + + for i, samples in enumerate(data_loader) # training loop + with tensor_logger: + tensor_logger.set_iteration(i) + # run forward/backward iteration + + tensor_logger.save(filename) + + Implementation notes: + forward/backward activations/gradients are collected using nn.Module hooks. + However, model inputs are collected by overloading model.forward() method. + Model inputs can't be collected using the hooks since the hooks only provide + inputs and do not provide kwargs, if exist, of the forward method. + """ + + def __init__(self, + model, + start_iteration=0, + end_iteration=0, + log_activations_enabled=False, + log_grads_enabled=False, + log_inputs_enabled=False, + prefix=None): + + # for now, no support for virtual pipeline (interleaved) + if isinstance(model, list): + assert len(model) == 1, 'No support for list of multiple models (len={})'.format(len(model)) + model = model[0] + + self.model = model + self.start_iteration = start_iteration + self.end_iteration = end_iteration + self.log_activations_enabled = log_activations_enabled + self.log_grads_enabled = log_grads_enabled + self.log_inputs_enabled = log_inputs_enabled + self.prefix = 'model' if prefix is None else prefix + + # captured tensors are saved in the following hierarchy: + # { + # iteration: { # iteration number + # tensor_type: { # fwd_act/bwd_grad_in/bwd_grad_out + # name: [tensors] # tensor name's tensors. list is required due to e.g. grad accumulation + # } + # } + # } + class IterData(dict): + + def __init__(self): + super(IterData, self).__init__() + self['fwd_act'] = collections.defaultdict(list) + self['bwd_grad_in'] = collections.defaultdict(list) + self['bwd_grad_out'] = collections.defaultdict(list) + self['model_inputs'] = collections.defaultdict(list) + + self.data = collections.defaultdict(IterData) + self.active = False + self.current_iteration = 0 + self.fwd_handles = [] + self.bwd_handles = [] + + def _fqn(self, name): + return '.'.join([self.prefix, name]) if name else self.prefix + + def set_iteration(self, iteration): + self.current_iteration = iteration + + def get_num_recorded_iterations(self): + return len(self.data) + + @contextmanager + def log_iteration(self, iteration): + self.current_iteration = iteration + self._enable() + yield self + self._disable() + + def __enter__(self): + self._enable() + return self + + def __exit__(self): + self._disable() + + def clear(self): + self.data.clear() + + def save(self, filename, do_clear=True): + + def convert_for_pickle(obj): + if isinstance(obj, dict): + return {k: convert_for_pickle(v) for k, v in obj.items()} + elif isinstance(obj, list): + return [convert_for_pickle(e) for e in obj] + elif isinstance(obj, tuple): + return tuple([convert_for_pickle(e) for e in obj]) + else: + if isinstance(obj, Tensor): + return obj.detach().cpu() + else: + return obj + + data = convert_for_pickle(self.data) + torch.save(data, filename) + self.clear() if do_clear else None + + def _enable(self): + if not self.active and self.start_iteration <= self.current_iteration <= self.end_iteration: + self.active = True + self._enable_log_grads() if self.log_grads_enabled else None + self._enable_log_activations() if self.log_activations_enabled else None + self._enable_log_inputs() if self.log_inputs_enabled else None + + def _disable(self): + if self.active: + self.active = False + self._disable_log_grads() + self._disable_log_activations() + self._disable_log_inputs() + + @staticmethod + def _extract_tensors(t): + if t is None: + return None + elif isinstance(t, int): + return torch.tensor(t) + elif isinstance(t, torch.Tensor): + return t.detach().contiguous() + elif isinstance(t, list): + return [TensorLogger._extract_tensors(e) for e in t] + elif isinstance(t, tuple): + return tuple(TensorLogger._extract_tensors(e) for e in t) + elif isinstance(t, dict): + return {k: TensorLogger._extract_tensors(v) for k, v in t.items()} + assert False, 'Unsupported type: {}'.format(type(t)) + + def _save_fwd_activation(self, name, _mod, _inp, out): + fwd_act = self._extract_tensors(out) + self.data[self.current_iteration]['fwd_act'][name].append(fwd_act) + + def _save_bwd_grads(self, name, _mod, grad_input, grad_output): + grad_in = self._extract_tensors(grad_input) + grad_out = self._extract_tensors(grad_output) + self.data[self.current_iteration]['bwd_grad_in'][name].append(grad_in) + self.data[self.current_iteration]['bwd_grad_out'][name].append(grad_out) + + def _save_inputs(self, *inp, **kwargs): + model_inputs = self._extract_tensors(inp) + model_kwargs = self._extract_tensors(kwargs) + self.data[self.current_iteration]['model_inputs']['inputs'].append(model_inputs) + self.data[self.current_iteration]['model_inputs']['kwargs'].append(model_kwargs) + + def _enable_log_grads(self): + #Revert after [SW-69765] is fixed + full_bwd_hook_supported = False + for name, m in self.model.named_modules(): + register_fn = m.register_full_backward_hook if full_bwd_hook_supported else m.register_backward_hook + h = register_fn(partial(self._save_bwd_grads, self._fqn(name))) + self.bwd_handles.append(h) + + def _enable_log_activations(self): + for name, m in self.model.named_modules(): + h = m.register_forward_hook(partial(self._save_fwd_activation, self._fqn(name))) + self.fwd_handles.append(h) + + def _enable_log_inputs(self): + + def wrapped_forward(*inputs, **kwargs): + self._save_inputs(*inputs, **kwargs) + return self.model.original_forward__(*inputs, **kwargs) + + self.model.original_forward__ = self.model.forward + self.model.forward = wrapped_forward + + def _disable_log_grads(self): + for h in self.bwd_handles: + h.remove() + self.bwd_handles = [] + + def _disable_log_activations(self): + for h in self.fwd_handles: + h.remove() + self.fwd_handles = [] + + def _disable_log_inputs(self): + if hasattr(self.model, 'original_forward__'): + self.model.forward = self.model.original_forward__ + del self.model.original_forward__ + + +def save_logged_tensors(tensor_logger: TensorLogger, tensor_logger_path, rank_no, iteration=None): + if tensor_logger.get_num_recorded_iterations(): + makedirs(tensor_logger_path, exist_ok=True) + filename = 'tensor_logger_rank_{}'.format(rank_no) + '.pt' + if iteration is not None: + filename = 'tensor_logger_rank_{}_iter_{}'.format(rank_no, iteration) + '.pt' + fullname = join(tensor_logger_path, filename) + tensor_logger.save(fullname) diff --git a/deepspeed/utils/__init__.py b/deepspeed/utils/__init__.py index 33ea8ba60818..75fb6aa9d30a 100644 --- a/deepspeed/utils/__init__.py +++ b/deepspeed/utils/__init__.py @@ -10,7 +10,7 @@ from .groups import * from .nvtx import instrument_w_nvtx # TODO: Move tensor fragment and mixed precision to zero utils -from .tensor_fragment import tensor_fragment, get_full_hp_param, get_hp_fragment_mapping, fragment_address, get_full_hp_grad +from .tensor_fragment import tensor_fragment, get_full_hp_param, get_hp_fragment_mapping, fragment_address, get_full_hp_grad, map_to_flat_opt_states from .tensor_fragment import safe_get_full_fp32_param, safe_get_full_grad, safe_get_full_optimizer_state from .tensor_fragment import set_full_hp_param from .tensor_fragment import safe_set_full_fp32_param, safe_set_full_optimizer_state diff --git a/deepspeed/utils/bwc.py b/deepspeed/utils/bwc.py new file mode 100644 index 000000000000..69fcc251a684 --- /dev/null +++ b/deepspeed/utils/bwc.py @@ -0,0 +1,104 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + + +def bwc_tensor_model_parallel_rank(mpu=None): + """Backwards-compatible way of querying the tensor model parallel rank from + an ``mpu`` object. + + *Tensor* model parallelism means that tensors are physically split across + processes. This contrasts with *pipeline* model parallelism, in which the + layers are partitioned but tensors left intact. + + The API for tensor model parallelism has changed across versions and this + helper provides a best-effort implementation across versions of ``mpu`` + objects. The preferred mechanism is + ``mpu.get_tensor_model_parallel_rank()``. + + This should "just work" with both Megatron-LM and DeepSpeed's pipeline + parallelism. + + Args: + mpu (model parallel unit, optional): The tensor model parallel rank. + If ``mpu=None``, returns 0. Defaults to ``None``. + + Returns: + int: the rank + """ + if mpu is None: + # No model parallelism in easy :) + return 0 + + if hasattr(mpu, 'get_tensor_model_parallel_rank'): + # New Megatron and DeepSpeed convention (post pipeline-parallelism release) + return mpu.get_tensor_model_parallel_rank() + elif hasattr(mpu, 'get_slice_parallel_rank'): + # Some DeepSpeed + pipeline parallelism versions + return mpu.get_slice_parallel_rank() + else: + # Deprecated Megatron and DeepSpeed convention + return mpu.get_model_parallel_rank() + + +def bwc_tensor_model_parallel_world_size(mpu=None): + """Backwards-compatible way of querying the tensor model parallel world size. + Similar to bwc_tensor_model_parallel_rank. + """ + if mpu is None: + return 1 + + if hasattr(mpu, 'get_tensor_model_parallel_world_size'): + # New Megatron and DeepSpeed convention (post pipeline-parallelism release) + return mpu.get_tensor_model_parallel_world_size() + elif hasattr(mpu, 'get_slice_parallel_world_size'): + # Some DeepSpeed + pipeline parallelism versions + return mpu.get_slice_parallel_world_size() + else: + # Deprecated Megatron and DeepSpeed convention + return mpu.get_model_parallel_world_size() + + +def bwc_tensor_model_parallel_group(mpu=None): + """Backwards-compatible way of querying the tensor model parallel group. + Similar to bwc_tensor_model_parallel_rank. + """ + if mpu is None: + return None + + if hasattr(mpu, 'get_tensor_model_parallel_group'): + # New Megatron and DeepSpeed convention (post pipeline-parallelism release) + return mpu.get_tensor_model_parallel_group() + elif hasattr(mpu, 'get_slice_parallel_group'): + # Some DeepSpeed + pipeline parallelism versions + return mpu.get_slice_parallel_group() + else: + # Deprecated Megatron and DeepSpeed convention + return mpu.get_model_parallel_group() + + +def bwc_pipeline_parallel_world_size(mpu=None): + """Backwards-compatible way of querying the pipeline parallel world size.""" + world_size = 1 + if mpu is not None: + if hasattr(mpu, 'get_pipeline_model_parallel_world_size'): + # New Megatron and DeepSpeed convention (post pipeline-parallelism release) + world_size = mpu.get_pipeline_model_parallel_world_size() + elif hasattr(mpu, 'get_pipe_parallel_world_size'): + # DeepSpeed Topology + world_size = mpu.get_pipe_parallel_world_size() + return world_size + + +def bwc_pipeline_parallel_group(mpu=None): + """Backwards-compatible way of querying the pipeline parallel group.""" + if mpu is None: + return None + if hasattr(mpu, 'get_pipeline_model_parallel_group'): + # Megatron + return mpu.get_pipeline_model_parallel_group() + elif hasattr(mpu, 'get_pipe_parallel_group'): + # DeepSpeed Topology + return mpu.get_pipe_parallel_group() + assert False, 'mpu does not support pipeline parallel group' diff --git a/deepspeed/utils/config.py b/deepspeed/utils/config.py new file mode 100644 index 000000000000..15f37ca7d874 --- /dev/null +++ b/deepspeed/utils/config.py @@ -0,0 +1,46 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from deepspeed.runtime.config_utils import DeepSpeedConfigModel + +######################################### +# Timers +######################################### +# Timers. By default, timers are enabled. +# Users can configure in ds_config.json as below example: +TIMERS_FORMAT = ''' +Timers should be enabled as: +"timers": { + "throughput": { + "enabled": true, + "synchronized": true + } +} +''' + +TIMERS = "timers" +TIMERS_THROUGHPUT = "throughput" + + +def get_timers_config(param_dict): + if param_dict and TIMERS in param_dict and TIMERS_THROUGHPUT in param_dict[TIMERS]: + timers_config_dict = param_dict[TIMERS][TIMERS_THROUGHPUT] + else: + timers_config_dict = {} + return DeepSpeedThroughputTimerConfig(**timers_config_dict) + + +class DeepSpeedThroughputTimerConfig(DeepSpeedConfigModel): + """ Configure throughput timers """ + + enabled: bool = True + """ Turn on/off throughput timers """ + + synchronized: bool = True + """ Whether to synchronize a device when measuring the time. + Synchronizing a device is required to produce the most accurate timer measurements. + However, this comes at the expense of performance degradation. The CPU timer provides + sufficient accuracy in many cases. + """ diff --git a/deepspeed/utils/groups.py b/deepspeed/utils/groups.py index 63dda7f5aaae..381c1f895652 100644 --- a/deepspeed/utils/groups.py +++ b/deepspeed/utils/groups.py @@ -27,12 +27,15 @@ from deepspeed import comm as dist from deepspeed.utils import log_dist +from deepspeed.utils.bwc import bwc_tensor_model_parallel_world_size, bwc_pipeline_parallel_world_size from deepspeed.utils.exceptions import DeprecatedException from deepspeed.accelerator import get_accelerator # Expert parallel group that the current rank belongs to. _EXPERT_PARALLEL_GROUP = {} # Expert data parallel group that the current rank belongs to. _EXPERT_DATA_PARALLEL_GROUP = {} +# Expert Parallel combined with Tensor Parallel that the current rank belongs to. +_EXPERT_TENSOR_PARALLEL_GROUP = {} # dist world group needs to be cloned for some cases _WORLD_GROUP = None # ZeRO parameter partitioning group that the current rank belongs to. @@ -128,31 +131,32 @@ def _create_expert_and_data_parallel(expert_parallel_size_, use_data_before_expe log_dist(f'Creating expert and data parallel groups with size {expert_parallel_size_}', ranks=[0]) world_size = dist.get_world_size() + pp_world_size = 1 if mpu is None else bwc_pipeline_parallel_world_size(mpu) rank = dist.get_rank() - _ensure_divisibility(world_size, expert_parallel_size_) + pp_stride = world_size // pp_world_size + _ensure_divisibility(pp_stride, expert_parallel_size_) group_name = f"ep_size_{expert_parallel_size_}" # Build the expert data parallel groups. global _EXPERT_DATA_PARALLEL_GROUP - ep_stride = world_size // expert_parallel_size_ + ep_stride = pp_stride // expert_parallel_size_ # Only create group if it does not already exist if group_name not in _EXPERT_DATA_PARALLEL_GROUP: - for i in range(expert_parallel_size_): - if use_data_before_expert_parallel_: - ranks = range(i * ep_stride, (i + 1) * ep_stride) - else: - ranks = range(i, world_size, expert_parallel_size_) - group = dist.new_group(ranks) - log_dist(f'Creating expert data parallel process group named {group_name} with ranks: {list(ranks)}', [0]) - if use_data_before_expert_parallel_: - if i == (rank // ep_stride): - _EXPERT_DATA_PARALLEL_GROUP[group_name] = group - else: - if i == (rank % expert_parallel_size_): + for pp_stage_start in range(0, world_size, pp_stride): + for i in range(expert_parallel_size_): + if use_data_before_expert_parallel_: + ranks = range(pp_stage_start + i * ep_stride, pp_stage_start + (i + 1) * ep_stride) + else: + ranks = range(pp_stage_start + i, pp_stage_start + pp_stride, expert_parallel_size_) + group = dist.new_group(ranks) + log_dist( + f'Creating expert data parallel process group named {group_name} ' + f'with ranks: {list(ranks)}', [0]) + if rank in ranks: _EXPERT_DATA_PARALLEL_GROUP[group_name] = group # Build the expert parallel groups. @@ -161,24 +165,29 @@ def _create_expert_and_data_parallel(expert_parallel_size_, use_data_before_expe # Only create group if it does not already exist if group_name not in _EXPERT_PARALLEL_GROUP: if use_data_before_expert_parallel_: - for i in range(ep_stride): - ranks = range(i, world_size, ep_stride) - group = dist.new_group(ranks) - log_dist(f'creating expert parallel process group named {group_name} with ranks: {list(ranks)}', [0]) - if i == (rank % ep_stride): - _EXPERT_PARALLEL_GROUP[group_name] = group + for pp_stage_start in range(0, world_size, pp_stride): + for i in range(ep_stride): + ranks = range(pp_stage_start + i, pp_stage_start + pp_stride, ep_stride) + group = dist.new_group(ranks) + log_dist( + f'creating expert parallel process group named {group_name} ' + f'with ranks: {list(ranks)}', [0]) + if rank in ranks: + _EXPERT_PARALLEL_GROUP[group_name] = group else: for i in range(world_size // expert_parallel_size_): ranks = range(i * expert_parallel_size_, (i + 1) * expert_parallel_size_) group = dist.new_group(ranks) - log_dist(f'creating expert parallel process group named {group_name} with ranks: {list(ranks)}', [0]) - if i == (rank // expert_parallel_size_): + log_dist(f'creating expert parallel process group named {group_name} ' + f'with ranks: {list(ranks)}', [0]) + if rank in ranks: _EXPERT_PARALLEL_GROUP[group_name] = group def _get_expert_parallel_ranks(world_size, - model_parallel_size_, + tensor_parallel_size_, expert_parallel_size_, + pipeline_parallel_size_=1, use_data_before_expert_parallel_=False): """Generate expert parallel and expert data parallel group ranks list. @@ -193,32 +202,40 @@ def _get_expert_parallel_ranks(world_size, Args: world_size (int): Distributed world size. - model_parallel_size_ (int): Model parallel group size. + tensor_parallel_size_ (int): Tensor parallel group size. expert_parallel_size_ (int): Expert parallel group size. + pipeline_parallel_size_ (int): Pipeline parallel group size use_data_before_expert_parallel_ (bool): Use the D + E instead of E + D topology Returns: Expert parallel group ranks and Expert data parallel group ranks list. """ - _ensure_divisibility(world_size, model_parallel_size_) - dp_world_size = world_size // model_parallel_size_ + _ensure_divisibility(world_size, tensor_parallel_size_ * pipeline_parallel_size_) + dp_world_size = world_size // (tensor_parallel_size_ * pipeline_parallel_size_) _ensure_divisibility(dp_world_size, expert_parallel_size_) # Generate data parallel groups data_parallel_groups = [] - dp_group_size = model_parallel_size_ + dp_group_size = tensor_parallel_size_ + pp_stride = world_size // pipeline_parallel_size_ if use_data_before_expert_parallel_: - dp_stride = world_size // expert_parallel_size_ // model_parallel_size_ - for i in range(dp_group_size): - data_parallel_groups.append(list()) - for ds in range(dp_stride): - # [0, 4, 8, 12, 16, 20, 24, 28, 2, 6, 10, 14, 18, 22, 26, 30] - # [1, 5, 9, 13, 17, 21, 25, 29, 3, 7, 11, 15, 19, 23, 27, 31] - data_parallel_groups[-1].extend( - list(range(i + ds * model_parallel_size_, world_size, dp_stride * model_parallel_size_))) + dp_stride = world_size // expert_parallel_size_ // tensor_parallel_size_ // pipeline_parallel_size_ + for pp_stage_start in range(0, world_size, pp_stride): + pp_stage_next = pp_stage_start + pp_stride + for i in range(dp_group_size): + data_parallel_groups.append(list()) + for ds in range(dp_stride): + # [0, 4, 8, 12, 16, 20, 24, 28, 2, 6, 10, 14, 18, 22, 26, 30] + # [1, 5, 9, 13, 17, 21, 25, 29, 3, 7, 11, 15, 19, 23, 27, 31] + data_parallel_groups[-1].extend( + list( + range(pp_stage_start + i + ds * tensor_parallel_size_, pp_stage_next, + dp_stride * tensor_parallel_size_))) else: - for i in range(dp_group_size): - data_parallel_groups.append(list(range(i, world_size, dp_group_size))) + for pp_stage_start in range(0, world_size, pp_stride): + pp_stage_next = pp_stage_start + pp_stride + for i in range(dp_group_size): + data_parallel_groups.append(list(range(pp_stage_start + i, pp_stage_next, dp_group_size))) expert_parallel_groups = [] expert_data_parallel_groups = [] @@ -236,7 +253,36 @@ def _get_expert_parallel_ranks(world_size, return expert_parallel_groups, expert_data_parallel_groups -def _create_expert_data_and_model_parallel(expert_parallel_size_, mpu, use_data_before_expert_parallel_=False): +def _get_expert_tensor_parallel_ranks(expert_parallel_groups): + # create a dict from each rank to the ep_group ranks it belongs to + rank_to_ep_group = {} + for ranks in expert_parallel_groups: + for rank in ranks: + rank_to_ep_group[rank] = ranks + + # go over all tensor groups, rank by rank + # for each rank, add the ep_ranks to current tensor group, if not already added + # in order to add ep ranks only once, we delete all rank members from rank_to_ep_group + global expert_tensor_parallel_world_size + world_size = dist.get_world_size() + expert_tensor_parallel_groups = [] + for i in range(world_size // expert_tensor_parallel_world_size): + ep_tp_ranks = [] + for t in range(expert_tensor_parallel_world_size): + rank = i * expert_tensor_parallel_world_size + t + ep_ranks = rank_to_ep_group.get(rank, []) + for r in ep_ranks: + rank_to_ep_group.pop(r) + ep_tp_ranks.extend(ep_ranks) + if ep_tp_ranks: + expert_tensor_parallel_groups.append(sorted(ep_tp_ranks)) + return expert_tensor_parallel_groups + + +def _create_expert_data_and_model_parallel(expert_parallel_size_, + mpu, + use_data_before_expert_parallel_=False, + create_expert_tensor_parallel_group=False): """ Create expert and data parallel groups based on MPU (model parallel) group. @@ -252,36 +298,33 @@ def _create_expert_data_and_model_parallel(expert_parallel_size_, mpu, use_data_ expert_data_parallel_group = [0,8],[2,10],[4,12],[6,14], [1,9],[3,11],[5,13],[7,15] """ assert dist.is_initialized(), "dist is not initialized" - model_parallel_size_ = mpu.get_model_parallel_world_size() + tensor_parallel_size_ = bwc_tensor_model_parallel_world_size(mpu) global expert_tensor_parallel_world_size - expert_tensor_parallel_world_size = model_parallel_size_ + expert_tensor_parallel_world_size = tensor_parallel_size_ world_size = dist.get_world_size() rank = dist.get_rank() dp_world_size = mpu.get_data_parallel_world_size() - dp_rank = mpu.get_data_parallel_rank() + pp_world_size = 1 if mpu is None else bwc_pipeline_parallel_world_size(mpu) - _ensure_divisibility(world_size, model_parallel_size_) + _ensure_divisibility(world_size, tensor_parallel_size_) _ensure_divisibility(dp_world_size, expert_parallel_size_) log_dist( - f"Creating deepspeed groups with model parallel size {model_parallel_size_}, expert parallel size {expert_parallel_size_}, world size {world_size}, dp world size {dp_world_size}", - [0]) + f"Creating deepspeed groups with model parallel size {tensor_parallel_size_}, " + f"pipeline parallel size {pp_world_size}, expert parallel size {expert_parallel_size_}, " + f"world size {world_size}, dp world size {dp_world_size}", [0]) global _EXPERT_PARALLEL_GROUP, _EXPERT_DATA_PARALLEL_GROUP - # Get world size and rank. Ensure some consistencies. - _DATA_PARALLEL_GROUP = mpu.get_data_parallel_group() - _MODEL_PARALLEL_GROUP = mpu.get_model_parallel_group() - group_name = f"ep_size_{expert_parallel_size_}" # Only create groups if they don't already exist # Need to check conditions outside the group creation loop because of the way torch.dist group creation works if group_name not in _EXPERT_DATA_PARALLEL_GROUP and group_name not in _EXPERT_PARALLEL_GROUP: expert_parallel_groups, expert_data_parallel_groups = _get_expert_parallel_ranks( - world_size, model_parallel_size_, expert_parallel_size_, use_data_before_expert_parallel_) + world_size, tensor_parallel_size_, expert_parallel_size_, pp_world_size, use_data_before_expert_parallel_) for ranks in expert_parallel_groups: group = dist.new_group(ranks) if rank in list(ranks): @@ -292,6 +335,18 @@ def _create_expert_data_and_model_parallel(expert_parallel_size_, mpu, use_data_ if rank in list(ranks): _EXPERT_DATA_PARALLEL_GROUP[group_name] = group + if create_expert_tensor_parallel_group: + # calculate ep_tp_groups and validate correct number of groups + expert_tensor_parallel_groups = _get_expert_tensor_parallel_ranks(expert_parallel_groups) + n_ep_tp_groups = world_size // expert_parallel_size_ // expert_tensor_parallel_world_size + assert n_ep_tp_groups == len(expert_tensor_parallel_groups) + + # create groups + for ranks in expert_tensor_parallel_groups: + group = dist.new_group(ranks) + if rank in list(ranks): + _EXPERT_TENSOR_PARALLEL_GROUP[group_name] = group + def _get_max_expert_size(): """Get the maximum ep_size from all the created groups.""" @@ -338,6 +393,18 @@ def _get_expert_data_parallel_group_dict(): return _EXPERT_DATA_PARALLEL_GROUP +def _get_expert_tensor_parallel_group(group_name): + """Get the expert tensor parallel group the caller rank belongs to.""" + assert group_name in _EXPERT_TENSOR_PARALLEL_GROUP, \ + f'expert tensor parallel group is not initialized for {group_name=}' + return _EXPERT_TENSOR_PARALLEL_GROUP[group_name] + + +def _get_expert_tensor_parallel_group_dict(): + """Get the expert tensor parallel group dict.""" + return _EXPERT_TENSOR_PARALLEL_GROUP + + def _clone_world_group(): """Create a clone of the world group Note: We need to clone the dist world group because we diff --git a/deepspeed/utils/tensor_fragment.py b/deepspeed/utils/tensor_fragment.py index 49eefafcfbcc..b34722580ddd 100644 --- a/deepspeed/utils/tensor_fragment.py +++ b/deepspeed/utils/tensor_fragment.py @@ -58,6 +58,21 @@ def get_hp_fragment(self, optim_state_key=None): return self.get_optim_state_fragment(optim_state_key) +def map_to_flat_opt_states(flat_hp_tensor, lp_tensors, optim_state, opt_keys): + for key in opt_keys: + hp_param = flat_hp_tensor + buffer = torch.zeros_like(hp_param) + + for lp in lp_tensors: + if lp._hp_mapping is not None: + hp_fragment_address = lp._hp_mapping.get_hp_fragment_address() + hp_fragment = buffer.narrow(0, hp_fragment_address.start, hp_fragment_address.numel) + hp_fragment.data.copy_(lp._hp_mapping.get_hp_fragment(optim_state_key=key).data) + lp._hp_mapping.hp_fragment = hp_fragment + + optim_state[hp_param][key] = buffer + + def get_full_hp_param(self, optim_state_key=None): reduce_buffer = torch.zeros_like(self, dtype=torch.float32).flatten() if self._hp_mapping is not None: diff --git a/deepspeed/utils/timer.py b/deepspeed/utils/timer.py index 11ef54fe4665..dd78b207cc37 100755 --- a/deepspeed/utils/timer.py +++ b/deepspeed/utils/timer.py @@ -197,15 +197,9 @@ def get_mean(self, names, normalizer=1.0, reset=True): class ThroughputTimer: - def __init__( - self, - batch_size, - start_step=2, - steps_per_output=50, - monitor_memory=False, - logging_fn=None, - ): + def __init__(self, config, batch_size, start_step=2, steps_per_output=50, monitor_memory=False, logging_fn=None): from deepspeed.utils import logger + self.config = config self.start_time = 0 self.end_time = 0 self.started = False @@ -234,14 +228,17 @@ def _init_timer(self): self.initialized = True def start(self): + if not self.config.enabled: + return self._init_timer() self.started = True if self.global_step_count >= self.start_step: - get_accelerator().synchronize() + if self.config.synchronized: + get_accelerator().synchronize() self.start_time = time.time() def stop(self, global_step=False, report_speed=True): - if not self.started: + if not self.config.enabled or not self.started: return self.started = False self.micro_step_count += 1 @@ -249,7 +246,8 @@ def stop(self, global_step=False, report_speed=True): self.global_step_count += 1 if self.start_time > 0: - get_accelerator().synchronize() + if self.config.synchronized: + get_accelerator().synchronize() self.end_time = time.time() duration = self.end_time - self.start_time self.total_elapsed_time += duration diff --git a/op_builder/cpu_adagrad.py b/op_builder/cpu_adagrad.py index d3f163f7464a..c05f71488950 100644 --- a/op_builder/cpu_adagrad.py +++ b/op_builder/cpu_adagrad.py @@ -3,7 +3,6 @@ # DeepSpeed Team -import os from .builder import TorchCPUOpBuilder @@ -18,26 +17,11 @@ def absolute_name(self): return f'deepspeed.ops.adagrad.{self.NAME}_op' def sources(self): - if self.build_for_cpu: - return ['csrc/adagrad/cpu_adagrad.cpp'] - - return ['csrc/adagrad/cpu_adagrad.cpp', 'csrc/common/custom_cuda_kernel.cu'] + return ['csrc/adagrad/cpu_adagrad.cpp'] def libraries_args(self): args = super().libraries_args() - if self.build_for_cpu: - return args - - if not self.is_rocm_pytorch(): - args += ['curand'] return args def include_paths(self): - import torch - if self.build_for_cpu: - CUDA_INCLUDE = [] - elif not self.is_rocm_pytorch(): - CUDA_INCLUDE = [os.path.join(torch.utils.cpp_extension.CUDA_HOME, "include")] - else: - CUDA_INCLUDE = [] - return ['csrc/includes'] + CUDA_INCLUDE + return ['csrc/includes'] diff --git a/op_builder/cpu_adam.py b/op_builder/cpu_adam.py index 7c34c4ce43a1..7f4c0847a8c4 100644 --- a/op_builder/cpu_adam.py +++ b/op_builder/cpu_adam.py @@ -3,7 +3,6 @@ # DeepSpeed Team -import os from .builder import TorchCPUOpBuilder @@ -18,27 +17,11 @@ def absolute_name(self): return f'deepspeed.ops.adam.{self.NAME}_op' def sources(self): - if self.build_for_cpu: - return ['csrc/adam/cpu_adam.cpp', 'csrc/adam/cpu_adam_impl.cpp'] - - return ['csrc/adam/cpu_adam.cpp', 'csrc/adam/cpu_adam_impl.cpp', 'csrc/common/custom_cuda_kernel.cu'] + return ['csrc/adam/cpu_adam.cpp', 'csrc/adam/cpu_adam_impl.cpp'] def libraries_args(self): args = super().libraries_args() - if self.build_for_cpu: - return args - - if not self.is_rocm_pytorch(): - args += ['curand'] - return args def include_paths(self): - import torch - if self.build_for_cpu: - CUDA_INCLUDE = [] - elif not self.is_rocm_pytorch(): - CUDA_INCLUDE = [os.path.join(torch.utils.cpp_extension.CUDA_HOME, "include")] - else: - CUDA_INCLUDE = [] - return ['csrc/includes'] + CUDA_INCLUDE + return ['csrc/includes'] diff --git a/op_builder/cpu_lion.py b/op_builder/cpu_lion.py index 5c16d10ebb44..9a60d99773b3 100644 --- a/op_builder/cpu_lion.py +++ b/op_builder/cpu_lion.py @@ -3,7 +3,6 @@ # DeepSpeed Team -import os from .builder import TorchCPUOpBuilder @@ -18,31 +17,11 @@ def absolute_name(self): return f'deepspeed.ops.lion.{self.NAME}_op' def sources(self): - if self.build_for_cpu: - return ['csrc/lion/cpu_lion.cpp', 'csrc/lion/cpu_lion_impl.cpp'] - - return ['csrc/lion/cpu_lion.cpp', 'csrc/lion/cpu_lion_impl.cpp', 'csrc/common/custom_cuda_kernel.cu'] + return ['csrc/lion/cpu_lion.cpp', 'csrc/lion/cpu_lion_impl.cpp'] def libraries_args(self): args = super().libraries_args() - if self.build_for_cpu: - return args - - if not self.is_rocm_pytorch(): - args += ['curand'] - return args def include_paths(self): - import torch - if self.build_for_cpu: - CUDA_INCLUDE = [] - elif not self.is_rocm_pytorch(): - CUDA_INCLUDE = [os.path.join(torch.utils.cpp_extension.CUDA_HOME, "include")] - else: - CUDA_INCLUDE = [ - os.path.join(torch.utils.cpp_extension.ROCM_HOME, "include"), - os.path.join(torch.utils.cpp_extension.ROCM_HOME, "include", "rocrand"), - os.path.join(torch.utils.cpp_extension.ROCM_HOME, "include", "hiprand"), - ] - return ['csrc/includes'] + CUDA_INCLUDE + return ['csrc/includes'] diff --git a/op_builder/hpu/__init__.py b/op_builder/hpu/__init__.py index 6527ace087b5..5ad1b9a7f891 100644 --- a/op_builder/hpu/__init__.py +++ b/op_builder/hpu/__init__.py @@ -7,4 +7,5 @@ from .cpu_adam import CPUAdamBuilder from .fused_adam import FusedAdamBuilder +from .transformer_inference import InferenceBuilder from .no_impl import NotImplementedBuilder diff --git a/op_builder/hpu/cpu_adam.py b/op_builder/hpu/cpu_adam.py index 2f3b7aefe705..58eea2698ebb 100644 --- a/op_builder/hpu/cpu_adam.py +++ b/op_builder/hpu/cpu_adam.py @@ -20,11 +20,6 @@ def absolute_name(self): def sources(self): return ['csrc/adam/cpu_adam.cpp', 'csrc/adam/cpu_adam_impl.cpp'] - def cxx_args(self): - args = super().cxx_args() - args += ['-DENABLE_BFLOAT16'] - return args - def libraries_args(self): args = super().libraries_args() return args diff --git a/op_builder/hpu/fused_adam.py b/op_builder/hpu/fused_adam.py index d77228317ddb..8caff5098b35 100644 --- a/op_builder/hpu/fused_adam.py +++ b/op_builder/hpu/fused_adam.py @@ -4,10 +4,85 @@ # DeepSpeed Team -from .builder import CPUOpBuilder +try: + # is op_builder from deepspeed or a 3p version? this should only succeed if it's deepspeed + # if successful this also means we're doing a local install and not JIT compile path + from op_builder import __deepspeed__ # noqa: F401 # type: ignore + from op_builder.builder import OpBuilder +except ImportError: + from deepspeed.ops.op_builder.builder import OpBuilder +import torch +import math -class FusedAdamBuilder(CPUOpBuilder): + +class HPUFusedAdam: + htcore = None + is_lazy_mode = None + + @staticmethod + def multi_tensor_adam(chunk_size, noop_flag_buffer, tensor_lists, lr, beta1, beta2, epsilon, step, adam_w_mode, + bias_correction, weight_decay, *args): + + if HPUFusedAdam.htcore is None: + from habana_frameworks.torch import core as htcore + from habana_frameworks.torch.utils.internal import is_lazy + HPUFusedAdam.htcore = htcore + HPUFusedAdam.is_lazy_mode = is_lazy() + + htcore = HPUFusedAdam.htcore + + htcore.step_closure._mark_step_if_lazy() + step_size = lr + if bias_correction: + bias_correction1 = 1.0 - pow(beta1, step) + bias_correction2 = 1.0 - pow(beta2, step) + step_size = step_size * math.sqrt(bias_correction2) / bias_correction1 + + neg_step = -step_size + neg_step_t = (torch.tensor([neg_step], dtype=torch.float, + requires_grad=False).to(tensor_lists[1][0].dtype).to(tensor_lists[1][0].device, + non_blocking=True)) + + weight_decay = weight_decay if adam_w_mode else 0 + + # since lr is fed into the kernel as tensor, perform the scalar multiplication of wd here + # NOTE: TODO if lr is updated every step, then we need to convert it as tensor and + # perform weight decay unconditonally. + modified_wd = 1.0 - weight_decay * lr + + if HPUFusedAdam.is_lazy_mode: + torch.ops.hpu.optimizer_adamw( + tensor_lists[0], + tensor_lists[1], + tensor_lists[2], + tensor_lists[3], + neg_step_t, + beta1, + beta2, + epsilon, + modified_wd, + ) + else: + modified_wd_t = (torch.tensor([modified_wd], dtype=torch.float, requires_grad=False).to( + tensor_lists[1][0].dtype).to(tensor_lists[1][0].device, non_blocking=True)) + torch.ops.hpu.optimizer_adamw( + tensor_lists[0], + tensor_lists[1], + tensor_lists[2], + tensor_lists[3], + neg_step_t, + beta1, + beta2, + epsilon, + modified_wd_t, + modified_wd != 1.0, + ) + + htcore.step_closure._mark_step_if_lazy() + + +class FusedAdamBuilder(OpBuilder): BUILD_VAR = "DS_BUILD_FUSED_ADAM" NAME = "fused_adam" @@ -18,12 +93,10 @@ def absolute_name(self): return f'deepspeed.ops.adam.{self.NAME}_op' def sources(self): - return ['csrc/cpu/adam/fused_adam.cpp', 'csrc/adam/cpu_adam_impl.cpp'] - - def cxx_args(self): - args = super().cxx_args() - args += ['-DENABLE_BFLOAT16'] - return args + return [] def include_paths(self): - return ['csrc/includes'] + return [] + + def load(self, verbose=True): + return HPUFusedAdam diff --git a/op_builder/hpu/transformer_inference.py b/op_builder/hpu/transformer_inference.py new file mode 100644 index 000000000000..e397c99200ec --- /dev/null +++ b/op_builder/hpu/transformer_inference.py @@ -0,0 +1,39 @@ +# Copyright (c) 2023 Habana Labs, Ltd. an Intel Company +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 +import importlib + +# DeepSpeed Team + +try: + # is op_builder from deepspeed or a 3p version? this should only succeed if it's deepspeed + # if successful this also means we're doing a local install and not JIT compile path + from op_builder import __deepspeed__ # noqa: F401 + from op_builder.builder import OpBuilder +except ImportError: + from deepspeed.ops.op_builder.builder import OpBuilder + + +class InferenceBuilder(OpBuilder): + BUILD_VAR = "DS_BUILD_TRANSFORMER_INFERENCE" + NAME = "transformer_inference" + + def __init__(self, name=None): + name = self.NAME if name is None else name + super().__init__(name=self.NAME) + + def absolute_name(self): + return f"deepspeed.ops.transformer.inference.{self.NAME}_op" + + def sources(self): + return [] + + def load(self, verbose=True): + if self.name in __class__._loaded_ops: + return __class__._loaded_ops[self.name] + + from deepspeed.git_version_info import installed_ops # noqa: F401 + if installed_ops.get(self.name, False): + op_module = importlib.import_module(self.absolute_name()) + __class__._loaded_ops[self.name] = op_module + return op_module diff --git a/pre-commit-toggle.sh b/pre-commit-toggle.sh new file mode 100755 index 000000000000..c458c7f2d0da --- /dev/null +++ b/pre-commit-toggle.sh @@ -0,0 +1,37 @@ +#!/bin/bash + +# Path to the pre-commit configuration file +PRE_COMMIT_CONFIG=".pre-commit-config.yaml" + +# Install pre-commit +pip install -r ${DEEPSPEED_FORK_ROOT}/requirements/requirements-dev.txt + +# Enable pre-commit +function enable_pre_commit() { + # Install pre-commit hooks + pre-commit install +} + +# Disable pre-commit +function disable_pre_commit() { + # Install pre-commit hooks + pre-commit uninstall +} + +# Check if the pre-commit configuration file exists +if [ -f "$PRE_COMMIT_CONFIG" ]; then + echo "Pre-commit configuration file found: $PRE_COMMIT_CONFIG" +else + echo "Pre-commit configuration file not found: $PRE_COMMIT_CONFIG" + exit 1 +fi + +# Check the command-line argument to enable or disable pre-commit +if [ "$1" == "enable" ]; then + enable_pre_commit +elif [ "$1" == "disable" ]; then + disable_pre_commit +else + echo "Usage: ./pre-commit-toggle.sh [enable|disable]" + exit 1 +fi diff --git a/requirements/requirements-sparse_attn.txt b/requirements/requirements-sparse_attn.txt index f929bb0168a5..09386fdcb120 100755 --- a/requirements/requirements-sparse_attn.txt +++ b/requirements/requirements-sparse_attn.txt @@ -1 +1 @@ -triton==1.0.0 +triton==2.0.0.dev20221202 diff --git a/test b/test new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/tests/conftest.py b/tests/conftest.py index 45e8434a021b..cb278f7f0095 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -11,6 +11,14 @@ from os.path import abspath, dirname, join import torch import warnings +from unit.ci_promote_marker import * +from unit.xfail_marker import * +from unit.skip_marker import * +from unit.compile_marker import * +from unit.a100_marker import * +from unit.util import get_hpu_dev_version +from deepspeed.accelerator import get_accelerator +from unit.util import hpu_lazy_enabled # Set this environment variable for the T5 inference unittest(s) (e.g. google/t5-v1_1-small) os.environ['PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION'] = 'python' @@ -70,6 +78,86 @@ def pytest_runtest_call(item): item.runtest = lambda: True # Dummy function so test is not run twice +def pytest_collection_modifyitems(items, config): + device = get_accelerator().device_name() + gaudi_dev = get_hpu_dev_version() + hpu_lazy_mode = hpu_lazy_enabled() + # Add comipile, CI and Promote marker + for item in items: + if item._nodeid in compile_tests: + item._pyfuncitem.add_marker(pytest.mark.compile) + if device != 'hpu': + if item._nodeid in a100_tests: + item._pyfuncitem.add_marker(pytest.mark.a100) + if item._nodeid in hpu_ci_tests: + item._pyfuncitem.add_marker(pytest.mark.hpu_ci) + if item._nodeid in hpu_ci_tests_4cards: + item._pyfuncitem.add_marker(pytest.mark.hpu_ci_4cards) + if item._nodeid in gpu_ci_tests: + item._pyfuncitem.add_marker(pytest.mark.gpu_ci) + if item._nodeid in hpu_promote_tests: + item._pyfuncitem.add_marker(pytest.mark.hpu_promote) + if item._nodeid in hpu_promote_tests_4cards: + item._pyfuncitem.add_marker(pytest.mark.hpu_promote_4cards) + if item._nodeid in gpu_promote_tests: + item._pyfuncitem.add_marker(pytest.mark.gpu_promote) + + # Add xfail and SKIP marker + item.user_properties.append(("module_name", item.module.__name__)) + if device == 'hpu': + # Lazy Run + if hpu_lazy_mode: + if item._nodeid in hpu_lazy_xfail_tests.keys(): + item._pyfuncitem.add_marker(pytest.mark.xfail(reason=hpu_lazy_xfail_tests[item._nodeid])) + if item._nodeid in hpu_lazy_skip_tests.keys(): + item._pyfuncitem.add_marker(pytest.mark.skipif(reason=hpu_lazy_skip_tests[item._nodeid])) + if gaudi_dev == "Gaudi": + if item._nodeid in g1_lazy_xfail_tests.keys(): + item._pyfuncitem.add_marker(pytest.mark.xfail(reason=g1_lazy_xfail_tests[item._nodeid])) + if item._nodeid in g1_lazy_skip_tests.keys(): + item._pyfuncitem.add_marker(pytest.mark.skip(reason=g1_lazy_skip_tests[item._nodeid])) + if gaudi_dev == "Gaudi2": + if item._nodeid in g2_lazy_xfail_tests.keys(): + item._pyfuncitem.add_marker(pytest.mark.xfail(reason=g2_lazy_xfail_tests[item._nodeid])) + if item._nodeid in g2_lazy_skip_tests.keys(): + item._pyfuncitem.add_marker(pytest.mark.skip(reason=g2_lazy_skip_tests[item._nodeid])) + if gaudi_dev == "Gaudi3": + if item._nodeid in g3_lazy_xfail_tests.keys(): + item._pyfuncitem.add_marker(pytest.mark.xfail(reason=g3_lazy_xfail_tests[item._nodeid])) + if item._nodeid in g3_lazy_skip_tests.keys(): + item._pyfuncitem.add_marker(pytest.mark.skip(reason=g3_lazy_skip_tests[item._nodeid])) + # Eager Run + else: + if item._nodeid in hpu_eager_xfail_tests.keys(): + item._pyfuncitem.add_marker(pytest.mark.xfail(reason=hpu_eager_xfail_tests[item._nodeid])) + if item._nodeid in hpu_eager_skip_tests.keys(): + item._pyfuncitem.add_marker(pytest.mark.skipif(reason=hpu_eager_skip_tests[item._nodeid])) + if gaudi_dev == "Gaudi": + if item._nodeid in g1_eager_xfail_tests.keys(): + item._pyfuncitem.add_marker(pytest.mark.xfail(reason=g1_eager_xfail_tests[item._nodeid])) + if item._nodeid in g1_eager_skip_tests.keys(): + item._pyfuncitem.add_marker(pytest.mark.skip(reason=g1_eager_skip_tests[item._nodeid])) + if gaudi_dev == "Gaudi2": + if item._nodeid in g2_eager_xfail_tests.keys(): + item._pyfuncitem.add_marker(pytest.mark.xfail(reason=g2_eager_xfail_tests[item._nodeid])) + if item._nodeid in g2_eager_skip_tests.keys(): + item._pyfuncitem.add_marker(pytest.mark.skip(reason=g2_eager_skip_tests[item._nodeid])) + if gaudi_dev == "Gaudi3": + if item._nodeid in g3_eager_xfail_tests.keys(): + item._pyfuncitem.add_marker(pytest.mark.xfail(reason=g3_eager_xfail_tests[item._nodeid])) + if item._nodeid in g3_eager_skip_tests.keys(): + item._pyfuncitem.add_marker(pytest.mark.skip(reason=g3_eager_skip_tests[item._nodeid])) + else: + if item._nodeid in gpu_xfail_tests.keys(): + item._pyfuncitem.add_marker(pytest.mark.xfail(reason=gpu_xfail_tests[item._nodeid])) + if item._nodeid in gpu_skip_tests.keys(): + item._pyfuncitem.add_marker(pytest.mark.skipif(reason=gpu_skip_tests[item._nodeid])) + for marker in item.own_markers: + if marker.name in ['skip', 'xfail']: + if 'reason' in marker.kwargs: + item.user_properties.append(("message", marker.kwargs['reason'])) + + # We allow DistributedTest to reuse distributed environments. When the last # test for a class is run, we want to make sure those distributed environments # are destroyed. @@ -85,3 +173,11 @@ def pytest_fixture_setup(fixturedef, request): if getattr(fixturedef.func, "is_dist_fixture", False): dist_fixture_class = fixturedef.func() dist_fixture_class(request) + + +def pytest_runtest_makereport(item, call): + if call.when == 'call': + if call.excinfo: + if not (any('message' in prop for prop in item.user_properties)): + if call.excinfo.value: + item.user_properties.append(("message", call.excinfo.value)) diff --git a/tests/perf/adam_test1.py b/tests/perf/adam_test1.py index b35477afb4fe..bde1d53e5179 100755 --- a/tests/perf/adam_test1.py +++ b/tests/perf/adam_test1.py @@ -6,12 +6,10 @@ import torch from deepspeed.ops.adam import DeepSpeedCPUAdam import time -from deepspeed.accelerator import get_accelerator device = 'cpu' model_size = 1 * 1024**3 param = torch.nn.Parameter(torch.ones(model_size, device=device)) -param_fp16 = torch.nn.Parameter(torch.ones(model_size, dtype=torch.half, device=get_accelerator().device_name(0))) optimizer = DeepSpeedCPUAdam([param]) #torch.set_num_threads(128) @@ -19,7 +17,7 @@ avg = 0 for i in range(100): start = time.time() - optimizer.step(fp16_param_groups=[param_fp16]) + optimizer.step() stop = time.time() avg += (stop - start) param.grad = torch.ones(model_size, device=device) * 2 diff --git a/tests/pytest.ini b/tests/pytest.ini index f841c47afc0c..72bec1e2f293 100644 --- a/tests/pytest.ini +++ b/tests/pytest.ini @@ -1,5 +1,5 @@ [pytest] -addopts = -m "not sequential and not nightly and not inference and not seq_inference and not inference_ops and not inference_v2 and not inference_v2_ops and not stable_diffusion and not evaluation" +addopts = -m "not sequential and not nightly and not inference and not seq_inference and not inference_ops and not inference_v2 and not inference_v2_ops and not stable_diffusion and not evaluation and not compile and not a100" markers = sequential:Tests that need to be run sequentially inference:Inference model tests @@ -11,3 +11,11 @@ markers = world_size:Change world size of individual tests in a class stable_diffusion:Tests that run Stable Diffusion evaluation:Tests that evaluate model correctness + compile: torch.compile tests + hpu_ci: hpu CI tests + hpu_ci_4cards: HPU CI with 4cards + hpu_promote: HPU Promote tests + hpu_promote_4cards: HPU Promote with 4cards + gpu_ci: GPU CI tests + gpu_promote: GPU Promote tests + a100: Run Unsupported titan-xp tests on a100 diff --git a/tests/unit/a100_marker.py b/tests/unit/a100_marker.py new file mode 100644 index 000000000000..0431f8987d06 --- /dev/null +++ b/tests/unit/a100_marker.py @@ -0,0 +1,84 @@ +# Copyright (c) 2023 Habana Labs, Ltd. an Intel Company +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +a100_tests = [ + "unit/runtime/half_precision/test_bf16.py::TestZero2ReduceScatterOff::test", + "unit/runtime/half_precision/test_bf16.py::TestZeroSupportedClientOptimizer::test[FusedAdam]", + "unit/runtime/half_precision/test_bf16.py::TestZeroSupportedClientOptimizer::test[Adam]", + "unit/runtime/half_precision/test_bf16.py::TestZeroAllowUntestedOptimizer::test", + "unit/runtime/half_precision/test_bf16.py::TestZeroEmptyPartition::test", + "unit/runtime/half_precision/test_bf16.py::TestZeroDtypeCocktail::test[bfp16-bfp16]", + "unit/runtime/half_precision/test_bf16.py::TestZeroDtypeCocktail::test[bfp16-fp32]", + "unit/runtime/half_precision/test_bf16.py::TestZeroDtypeCocktail::test[fp16-bfp16]", + "unit/runtime/half_precision/test_bf16.py::TestZeroDtypeCocktail::test[bfp16-fp16]", + "unit/runtime/half_precision/test_bf16.py::TestZeroDtypeCocktail::test[default-bfp16]", + "unit/runtime/half_precision/test_bf16.py::TestZeroEmptyGrad::test", + "unit/runtime/half_precision/test_bf16.py::TestAdamBF16ZeroOneCycleCompatibility::test", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-3-dtype0]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[cpu-2-dtype0]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[none-2-dtype0]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-2-dtype0]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[cpu-3-dtype0]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[none-1-dtype0]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-1-dtype0]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[none-3-dtype0]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[cpu-1-dtype0]", + "unit/runtime/test_ds_initialize.py::TestOptimizerImplementation::test[bf16-bf16-amp]", + "unit/runtime/test_ds_initialize.py::TestOptimizerImplementation::test[fp16-bf16-amp]", + "unit/runtime/test_ds_initialize.py::TestOptimizerImplementation::test[None-bf16-zero1]", + "unit/runtime/test_ds_initialize.py::TestOptimizerImplementation::test[fp16-bf16-zero2]", + "unit/runtime/test_ds_initialize.py::TestOptimizerImplementation::test[None-bf16-None]", + "unit/runtime/test_ds_initialize.py::TestOptimizerImplementation::test[bf16-bf16-None]", + "unit/runtime/test_ds_initialize.py::TestOptimizerImplementation::test[bf16-bf16-zero2]", + "unit/runtime/test_ds_initialize.py::TestOptimizerImplementation::test[fp32-bf16-zero1]", + "unit/runtime/test_ds_initialize.py::TestOptimizerImplementation::test[None-bf16-zero2]", + "unit/runtime/test_ds_initialize.py::TestOptimizerImplementation::test[fp32-bf16-zero3]", + "unit/runtime/test_ds_initialize.py::TestOptimizerImplementation::test[None-bf16-amp]", + "unit/runtime/test_ds_initialize.py::TestOptimizerImplementation::test[fp32-bf16-None]", + "unit/runtime/test_ds_initialize.py::TestOptimizerImplementation::test[fp32-bf16-amp]", + "unit/runtime/test_ds_initialize.py::TestOptimizerImplementation::test[fp16-bf16-zero1]", + "unit/runtime/test_ds_initialize.py::TestOptimizerImplementation::test[fp16-bf16-None]", + "unit/runtime/test_ds_initialize.py::TestOptimizerImplementation::test[None-bf16-zero3]", + "unit/runtime/test_ds_initialize.py::TestOptimizerImplementation::test[bf16-bf16-zero3]", + "unit/runtime/test_ds_initialize.py::TestOptimizerImplementation::test[fp16-bf16-zero3]", + "unit/runtime/test_ds_initialize.py::TestOptimizerImplementation::test[bf16-bf16-zero1]", + "unit/runtime/test_ds_initialize.py::TestOptimizerImplementation::test[fp32-bf16-zero2]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[False-False-1-dtype0]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[False-False-1-dtype0]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[True-False-1-dtype0]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[True-True-1-dtype0]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[False-True-1-dtype0]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[False-True-1-dtype0]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[True-True-1-dtype0]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[False-True-1-dtype0]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[True-True-1-dtype0]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[True-False-1-dtype0]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[True-False-1-dtype0]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[False-False-1-dtype0]", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_bf16_fragments[False]", + "unit/runtime/sparse_tensor/test_averaging_sparse_gradients.py::TestSparseAdam::test", + "unit/runtime/pipe/test_pipe.py::TestPipeCifar10::test_pipe_base[topo_config0]", + "unit/runtime/pipe/test_pipe.py::TestPipeCifar10::test_pipe_use_reentrant[topo_config2]", + "unit/runtime/pipe/test_pipe.py::TestPipeCifar10::test_pipe_base[topo_config2]", + "unit/runtime/pipe/test_pipe.py::TestPipeCifar10::test_pipe_use_reentrant[topo_config1]", + "unit/runtime/pipe/test_pipe.py::TestPipeCifar10::test_pipe_use_reentrant[topo_config0]", + "unit/runtime/pipe/test_pipe.py::TestPipeCifar10::test_pipe_base[topo_config1]", + "unit/checkpoint/test_pipeline.py::TestPipelineCheckpoint::test_checkpoint_pipe_engine[1-False]", + "unit/checkpoint/test_pipeline.py::TestPipelineCheckpoint::test_checkpoint_pipe_engine[0-False]", + "unit/runtime/zero/test_nvme_checkpointing.py::TestNVMeCheckpointing::test_nvme_checkpointing[nvme-nvme]", + "unit/runtime/zero/test_nvme_checkpointing.py::TestNVMeCheckpointing::test_nvme_checkpointing[cpu-nvme]", + "unit/runtime/zero/test_nvme_checkpointing.py::TestNVMeCheckpointing::test_nvme_checkpointing[cpu-cpu]", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[nvme-3-full-dtype0]", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[nvme-3-local-dtype1]", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[nvme-3-local-dtype2]", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[nvme-3-full-dtype2]", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[nvme-3-full-dtype1]", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[nvme-3-local-dtype0]", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[nvme-3-local-False]", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[nvme-3-local-True]", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[nvme-3-full-False]", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[nvme-3-full-True]", +] diff --git a/tests/unit/alexnet_model.py b/tests/unit/alexnet_model.py index cf533063d6ec..ba73ded571c0 100644 --- a/tests/unit/alexnet_model.py +++ b/tests/unit/alexnet_model.py @@ -100,12 +100,24 @@ def cifar_trainset(fp16=False): dist.barrier() if local_rank != 0: dist.barrier() - - data_root = os.getenv("TEST_DATA_DIR", "/tmp/") - trainset = torchvision.datasets.CIFAR10(root=os.path.join(data_root, "cifar10-data"), - train=True, - download=True, - transform=transform) + if os.getenv("CIFAR10_OFFLINE", default=None): + if os.getenv("CIFAR10_DATASET_PATH", default=None): + trainset = torchvision.datasets.CIFAR10(root=os.getenv("CIFAR10_DATASET_PATH", default=None), + train=True, + download=False, + transform=transform) + elif os.getenv("STORE_CIFAR10", default=None): + if os.getenv("CIFAR10_DATASET_PATH", default=None): + trainset = torchvision.datasets.CIFAR10(root=os.getenv("CIFAR10_DATASET_PATH", default=None), + train=True, + download=True, + transform=transform) + else: + data_root = os.getenv("TEST_DATA_DIR", "/tmp/") + trainset = torchvision.datasets.CIFAR10(root=os.path.join(data_root, "cifar10-data"), + train=True, + download=True, + transform=transform) if local_rank == 0: dist.barrier() return trainset diff --git a/tests/unit/checkpoint/common.py b/tests/unit/checkpoint/common.py index 7442e51bad5d..907e14292f0f 100644 --- a/tests/unit/checkpoint/common.py +++ b/tests/unit/checkpoint/common.py @@ -14,6 +14,7 @@ from deepspeed.runtime.zero.stage3 import DeepSpeedZeroOptimizer_Stage3 from deepspeed.runtime.zero.partition_parameters import ZeroParamStatus +from unit.util import hpu_lazy_enabled from unit.simple_model import * from unittest.mock import MagicMock, patch @@ -85,15 +86,20 @@ def compare_model_states(saved_model, loaded_model, compare_optimizer=True, load def compare_state_dicts(state0, state1, expected_mismatch_keys=[]): - for (k0, s0), (k1, s1) in zip(state0.items(), state1.items()): - assert k0 == k1, f'failure due to key mismatch {k0} != {k1}' - if k0 in expected_mismatch_keys: + key_set0 = set(k for k in state0.keys() if k not in expected_mismatch_keys) + key_set1 = set(k for k in state1.keys() if k not in expected_mismatch_keys) + assert key_set0 == key_set1, f'failure due to key mismatch {key_set0} != {key_set1}' + + for k in key_set0: + s0 = state0[k] + s1 = state1[k] + if k in expected_mismatch_keys: continue if isinstance(s0, torch.Tensor) and isinstance(s1, torch.Tensor): assert id(s0) != id(s1), f'Comparing optimizer state tensor against itself: {id(s0)} <====> {id(s1)}' assert torch.equal(s0.to('cpu'), s1.to('cpu')) else: - assert s0 == s1, f'failures with keys = {k0}, {k1}, values = {type(s0[0])} and {type(s1[0])}' + assert s0 == s1, f'failures with keys = {k}, {k}, values = {s0} and {s1}' def compare_opt_state_dicts(state0, state1, expected_mismatch_keys=[]): @@ -149,6 +155,8 @@ def create_moe_param_groups(model): def create_deepspeed_model(config_dict, model, base_optimizer): + if hpu_lazy_enabled(): + model.to(get_accelerator().device_name()) ds_model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=create_moe_param_groups(model), @@ -168,10 +176,14 @@ def checkpoint_correctness_verification(config_dict, base_optimizers=[None, None], empty_tag=False, seq_dataloader=False, - load_module_only=False): + load_module_only=False, + compile_mode=False): dtype = torch.half if fp16 else torch.float32 ds_model = create_deepspeed_model(config_dict=config_dict, model=models[0], base_optimizer=base_optimizers[0]) + if compile_mode: + ds_model.compile() + if seq_dataloader: data_loader = sequence_dataloader(model=ds_model, total_samples=50, diff --git a/tests/unit/checkpoint/test_latest_checkpoint.py b/tests/unit/checkpoint/test_latest_checkpoint.py index 41ce2278680f..eeeac5471c54 100644 --- a/tests/unit/checkpoint/test_latest_checkpoint.py +++ b/tests/unit/checkpoint/test_latest_checkpoint.py @@ -19,7 +19,8 @@ class TestLatestCheckpoint(DistributedTest): world_size = 1 - def test_existing_latest(self, tmpdir): + @pytest.mark.parametrize('compile_mode', [True, False]) + def test_existing_latest(self, tmpdir, compile_mode): config_dict = { "train_batch_size": 2, "steps_per_print": 1, @@ -28,7 +29,7 @@ def test_existing_latest(self, tmpdir): "params": { "lr": 0.00015 } - } + }, } hidden_dim = 10 models = [SimpleModel(hidden_dim=hidden_dim) for _ in range(2)] @@ -39,9 +40,11 @@ def test_existing_latest(self, tmpdir): load_optimizer_states=True, load_lr_scheduler_states=False, fp16=False, - empty_tag=True) + empty_tag=True, + compile_mode=compile_mode) - def test_missing_latest(self, tmpdir): + @pytest.mark.parametrize('compile_mode', [True, False]) + def test_missing_latest(self, tmpdir, compile_mode): config_dict = { "train_batch_size": 2, "steps_per_print": 1, @@ -50,10 +53,12 @@ def test_missing_latest(self, tmpdir): "params": { "lr": 0.00015 } - } + }, } hidden_dim = 10 model = SimpleModel(hidden_dim) model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters()) + if compile_mode: + model.compile() # should be no-op, since latest doesn't exist model.load_checkpoint(tmpdir) diff --git a/tests/unit/checkpoint/test_lr_scheduler.py b/tests/unit/checkpoint/test_lr_scheduler.py index c4c6773cd474..81c0128c17bc 100644 --- a/tests/unit/checkpoint/test_lr_scheduler.py +++ b/tests/unit/checkpoint/test_lr_scheduler.py @@ -8,18 +8,17 @@ from unit.common import DistributedTest from unit.simple_model import * - from unit.checkpoint.common import checkpoint_correctness_verification - import pytest +@pytest.mark.parametrize('compile_mode', [True, False]) @pytest.mark.parametrize('zero_stage, use_cpu_offload', [(0, False), (1, False), (2, False), (2, True), (3, False), (3, True)]) class TestLRSchedulerCheckpoint(DistributedTest): world_size = 2 - def test_checkpoint_lr_scheduler(self, tmpdir, zero_stage, use_cpu_offload): + def test_checkpoint_lr_scheduler(self, tmpdir, zero_stage, use_cpu_offload, compile_mode): if use_cpu_offload and not deepspeed.ops.__compatible_ops__[CPUAdamBuilder.NAME]: pytest.skip("cpu-adam is not compatible") @@ -49,14 +48,16 @@ def test_checkpoint_lr_scheduler(self, tmpdir, zero_stage, use_cpu_offload): "warmup_max_lr": 0.001, "warmup_num_steps": 1000 } - } + }, } hidden_dim = 10 + fp16 = True + zero3_init_dtype = None if zero_stage == 3: global DeepSpeedZeroOptimizer_Stage3 from deepspeed.runtime.zero.stage3 import DeepSpeedZeroOptimizer_Stage3 - with deepspeed.zero.Init(): + with deepspeed.zero.Init(dtype=zero3_init_dtype): models = [SimpleModel(hidden_dim, empty_grad=False) for _ in range(2)] else: models = [SimpleModel(hidden_dim, empty_grad=False) for _ in range(2)] @@ -66,9 +67,11 @@ def test_checkpoint_lr_scheduler(self, tmpdir, zero_stage, use_cpu_offload): hidden_dim, tmpdir, load_optimizer_states=False, - load_lr_scheduler_states=True) + load_lr_scheduler_states=True, + fp16=fp16, + compile_mode=compile_mode) - def test_checkpoint_no_lr_scheduler(self, tmpdir, zero_stage, use_cpu_offload): + def test_checkpoint_no_lr_scheduler(self, tmpdir, zero_stage, use_cpu_offload, compile_mode): if use_cpu_offload and not deepspeed.ops.__compatible_ops__[CPUAdamBuilder.NAME]: pytest.skip("cpu-adam is not compatible") @@ -99,8 +102,11 @@ def test_checkpoint_no_lr_scheduler(self, tmpdir, zero_stage, use_cpu_offload): } hidden_dim = 10 + fp16 = True + zero3_init_dtype = None + if zero_stage == 3: - with deepspeed.zero.Init(): + with deepspeed.zero.Init(dtype=zero3_init_dtype): models = [SimpleModel(hidden_dim, empty_grad=False) for _ in range(2)] else: models = [SimpleModel(hidden_dim, empty_grad=False) for _ in range(2)] @@ -110,4 +116,6 @@ def test_checkpoint_no_lr_scheduler(self, tmpdir, zero_stage, use_cpu_offload): hidden_dim, tmpdir, load_optimizer_states=False, - load_lr_scheduler_states=False) + load_lr_scheduler_states=False, + fp16=fp16, + compile_mode=compile_mode) diff --git a/tests/unit/checkpoint/test_mics_optimizer.py b/tests/unit/checkpoint/test_mics_optimizer.py index 3f853cd5c13a..c7758fe07520 100644 --- a/tests/unit/checkpoint/test_mics_optimizer.py +++ b/tests/unit/checkpoint/test_mics_optimizer.py @@ -12,7 +12,6 @@ from unit.common import DistributedTest from unit.simple_model import * from unit.checkpoint.common import * - import pytest if not required_torch_version(max_version=2.0): @@ -46,7 +45,6 @@ def _toy_model_config(self, shard_size): "mics_shard_size": shard_size } } - hidden_dim = 10 with deepspeed.zero.MiCS_Init(config_dict_or_path=config_dict): models = [SimpleModel(hidden_dim, empty_grad=False) for _ in range(2)] diff --git a/tests/unit/checkpoint/test_moe_checkpoint.py b/tests/unit/checkpoint/test_moe_checkpoint.py index 0706b7327ce8..d0942098491b 100644 --- a/tests/unit/checkpoint/test_moe_checkpoint.py +++ b/tests/unit/checkpoint/test_moe_checkpoint.py @@ -8,8 +8,9 @@ from unit.common import DistributedTest from unit.simple_model import * - +from deepspeed.accelerator import get_accelerator from unit.checkpoint.common import checkpoint_correctness_verification +from unit.util import hpu_lazy_enabled import pytest @@ -24,7 +25,7 @@ def test_checkpoint_moe(self, tmpdir, ep_size): config_dict = {"train_batch_size": 8, "steps_per_print": 1, "fp16": {"enabled": True}} hidden_dim = 16 - + fp16 = config_dict["fp16"]["enabled"] models = [SimpleMoEModel(hidden_dim=hidden_dim, num_experts=ep_size, ep_size=ep_size) for _ in range(2)] optimizers = [torch.optim.AdamW(params=model.parameters()) for model in models] checkpoint_correctness_verification(config_dict, @@ -33,13 +34,14 @@ def test_checkpoint_moe(self, tmpdir, ep_size): tmpdir=tmpdir, load_optimizer_states=True, load_lr_scheduler_states=False, - fp16=config_dict["fp16"]["enabled"], + fp16=fp16, empty_tag=True, base_optimizers=optimizers, seq_dataloader=True) + @pytest.mark.parametrize('compile_mode', [True, False]) @pytest.mark.parametrize("ep_size, load_optim_states", [(4, True), (4, False), (2, True), (2, False)]) - def test_checkpoint_moe_and_zero(self, tmpdir, ep_size, load_optim_states): + def test_checkpoint_moe_and_zero(self, tmpdir, ep_size, load_optim_states, compile_mode): if not required_torch_version(min_version=1.8): pytest.skip("DeepSpeed MoE tests need torch 1.8 or higher to run correctly") @@ -61,11 +63,16 @@ def test_checkpoint_moe_and_zero(self, tmpdir, ep_size, load_optim_states): }, "zero_optimization": { "stage": 2, - } + "reduce_scatter": True + }, } hidden_dim = 16 + fp16 = config_dict["fp16"]["enabled"] models = [SimpleMoEModel(hidden_dim=hidden_dim, num_experts=ep_size, ep_size=ep_size) for _ in range(2)] + if hpu_lazy_enabled(): + device = get_accelerator().device_name() + models = [model.to(device) for model in models] # param group must have a random unique name (for now) # TODO: clean-up this requirement, the unique name should not be required here param_groups = [{'params': [p for p in model.parameters()], 'name': 'random-unique-name'} for model in models] @@ -77,7 +84,8 @@ def test_checkpoint_moe_and_zero(self, tmpdir, ep_size, load_optim_states): tmpdir=tmpdir, load_optimizer_states=load_optim_states, load_lr_scheduler_states=False, - fp16=config_dict["fp16"]["enabled"], + fp16=fp16, empty_tag=True, base_optimizers=optimizers, - seq_dataloader=True) + seq_dataloader=True, + compile_mode=compile_mode) diff --git a/tests/unit/checkpoint/test_other_optimizer.py b/tests/unit/checkpoint/test_other_optimizer.py index 9cb8c4286880..9d3ca744f111 100644 --- a/tests/unit/checkpoint/test_other_optimizer.py +++ b/tests/unit/checkpoint/test_other_optimizer.py @@ -8,9 +8,7 @@ from unit.common import DistributedTest from unit.simple_model import * - from unit.checkpoint.common import checkpoint_correctness_verification - import pytest @@ -18,7 +16,8 @@ class TestOtherOptimizerCheckpoint(DistributedTest): world_size = 2 @pytest.mark.skipif(not deepspeed.ops.__compatible_ops__[FusedLambBuilder.NAME], reason="lamb is not compatible") - def test_checkpoint_unfused_optimizer(self, tmpdir): + @pytest.mark.parametrize('compile_mode', [True, False]) + def test_checkpoint_unfused_optimizer(self, tmpdir, compile_mode): config_dict = { "train_batch_size": 2, "steps_per_print": 1, @@ -47,9 +46,9 @@ def test_checkpoint_unfused_optimizer(self, tmpdir): "cycle_max_mom": 0.99, "decay_mom_rate": 0.0 } - } + }, } - + fp16 = True args = args_from_dict(tmpdir, config_dict) hidden_dim = 10 models = [SimpleModel(hidden_dim, empty_grad=False) for _ in range(2)] @@ -59,16 +58,21 @@ def test_checkpoint_unfused_optimizer(self, tmpdir): models=models, hidden_dim=hidden_dim, tmpdir=tmpdir, - load_optimizer_states=True) + load_optimizer_states=True, + fp16=fp16, + compile_mode=compile_mode) # Ignore optimizer states checkpoint_correctness_verification(config_dict, models=models, hidden_dim=hidden_dim, tmpdir=tmpdir, - load_optimizer_states=False) + load_optimizer_states=False, + fp16=fp16, + compile_mode=compile_mode) - def test_checkpoint_fused_optimizer(self, tmpdir): + @pytest.mark.parametrize('compile_mode', [True, False]) + def test_checkpoint_fused_optimizer(self, tmpdir, compile_mode): config_dict = { "train_batch_size": 2, "steps_per_print": 1, @@ -83,8 +87,9 @@ def test_checkpoint_fused_optimizer(self, tmpdir): }, "fp16": { "enabled": True - } + }, } + fp16 = True args = args_from_dict(tmpdir, config_dict) hidden_dim = 10 @@ -95,16 +100,21 @@ def test_checkpoint_fused_optimizer(self, tmpdir): models=models, hidden_dim=hidden_dim, tmpdir=tmpdir, - load_optimizer_states=True) + load_optimizer_states=True, + fp16=fp16, + compile_mode=compile_mode) # Ignore optimizer states checkpoint_correctness_verification(config_dict, models=models, hidden_dim=hidden_dim, tmpdir=tmpdir, - load_optimizer_states=False) + load_optimizer_states=False, + fp16=fp16, + compile_mode=compile_mode) - def test_checkpoint_fp32_optimizer(self, tmpdir): + @pytest.mark.parametrize('compile_mode', [True, False]) + def test_checkpoint_fp32_optimizer(self, tmpdir, compile_mode): config_dict = { "train_batch_size": 2, "steps_per_print": 1, @@ -119,7 +129,7 @@ def test_checkpoint_fp32_optimizer(self, tmpdir): }, "fp16": { "enabled": False - } + }, } args = args_from_dict(tmpdir, config_dict) @@ -129,4 +139,5 @@ def test_checkpoint_fp32_optimizer(self, tmpdir): models=models, hidden_dim=hidden_dim, tmpdir=tmpdir, - fp16=False) + fp16=False, + compile_mode=compile_mode) diff --git a/tests/unit/checkpoint/test_pipeline.py b/tests/unit/checkpoint/test_pipeline.py index 99f1ba2ec433..259e83dcdb01 100644 --- a/tests/unit/checkpoint/test_pipeline.py +++ b/tests/unit/checkpoint/test_pipeline.py @@ -8,15 +8,15 @@ from unit.simple_model import * from unit.checkpoint.common import checkpoint_correctness_verification from unit.util import skip_on_arch - import pytest class TestPipelineCheckpoint(DistributedTest): world_size = 4 + @pytest.mark.parametrize('compile_mode', [True, False]) @pytest.mark.parametrize("zero_stage", [0, 1]) - def test_checkpoint_pipe_engine(self, zero_stage, tmpdir): + def test_checkpoint_pipe_engine(self, zero_stage, compile_mode, tmpdir): skip_on_arch(min_arch=7) config_dict = { @@ -50,18 +50,20 @@ def test_checkpoint_pipe_engine(self, zero_stage, tmpdir): "cycle_max_mom": 0.99, "decay_mom_rate": 0.0 } - } + }, } + fp16 = config_dict['fp16']['enabled'] models = [LinearStackPipe(num_stages=2) for _ in range(2)] checkpoint_correctness_verification(config_dict=config_dict, models=models, hidden_dim=models[0].hidden_dim, tmpdir=tmpdir, - fp16=config_dict['fp16']['enabled'], + fp16=fp16, load_optimizer_states=True, load_lr_scheduler_states=True, - train_batch=True) + train_batch=True, + compile_mode=compile_mode) @pytest.mark.parametrize( "base_topo,test_topo", diff --git a/tests/unit/checkpoint/test_shared_weights.py b/tests/unit/checkpoint/test_shared_weights.py index ed69073fb81c..04e7c5d59a1a 100644 --- a/tests/unit/checkpoint/test_shared_weights.py +++ b/tests/unit/checkpoint/test_shared_weights.py @@ -7,8 +7,11 @@ import torch.nn as nn import deepspeed +import pytest from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint from unit.common import DistributedTest +from unit.util import hpu_lazy_enabled +from deepspeed.accelerator import get_accelerator class ModelWithSharedWeights(nn.Module): @@ -25,7 +28,8 @@ def __init__(self): class TestCheckpointSharedWeights(DistributedTest): world_size = 2 - def test_checkpoint_shared_weights(self, tmp_path): + @pytest.mark.parametrize('compile_mode', [True, False]) + def test_checkpoint_shared_weights(self, tmp_path, compile_mode): config = { "train_micro_batch_size_per_gpu": 2, "zero_allow_untested_optimizer": True, @@ -34,13 +38,18 @@ def test_checkpoint_shared_weights(self, tmp_path): }, } model = ModelWithSharedWeights() + if hpu_lazy_enabled(): + device = get_accelerator().current_device_name() + model.to(device) optimizer = torch.optim.Adam(model.parameters()) - deepspeed_engine, _, _, _ = deepspeed.initialize( config=config, model=model, optimizer=optimizer, ) + if compile_mode: + deepspeed_engine.compile() + filename = tmp_path / "checkpoint.pt" deepspeed_engine.save_checkpoint(filename, tag="checkpoint") diff --git a/tests/unit/checkpoint/test_sparse.py b/tests/unit/checkpoint/test_sparse.py index 19fbcd81e473..e2f0e1dc079b 100644 --- a/tests/unit/checkpoint/test_sparse.py +++ b/tests/unit/checkpoint/test_sparse.py @@ -24,8 +24,9 @@ class TestSparseCheckpoint(DistributedTest): [True, False], [True, True], ]) + @pytest.mark.parametrize('compile_mode', [True, False]) def test_non_strict_load_sparse(self, tmpdir, to_save_model_has_embedding, to_save_model_sparse, - destination_has_embedding, destination_sparse): + destination_has_embedding, destination_sparse, compile_mode): class ModelNoEmbedding(torch.nn.Module): @@ -66,6 +67,10 @@ def forward(self, x, offsets): "sparse_gradients": destination_sparse }) + if compile_mode: + engine_to_save.compile() + engine_destination.compile() + save_folder = os.path.join(tmpdir, 'saved_checkpoint') save_tag = '1' diff --git a/tests/unit/checkpoint/test_tag_validation.py b/tests/unit/checkpoint/test_tag_validation.py index b164c31e52b0..1fa720000ff1 100644 --- a/tests/unit/checkpoint/test_tag_validation.py +++ b/tests/unit/checkpoint/test_tag_validation.py @@ -7,15 +7,15 @@ from unit.common import DistributedTest from unit.simple_model import * - import pytest class TestCheckpointValidationTag(DistributedTest): world_size = 2 + @pytest.mark.parametrize('compile_mode', [True, False]) @pytest.mark.parametrize('valid_mode', ["FAIL", "WARN", "IGNORE"]) - def test_checkpoint_unique_tag(self, tmpdir, valid_mode): + def test_checkpoint_unique_tag(self, tmpdir, valid_mode, compile_mode): config_dict = { "train_batch_size": 2, "steps_per_print": 1, @@ -27,19 +27,22 @@ def test_checkpoint_unique_tag(self, tmpdir, valid_mode): }, "checkpoint": { "tag_validation": valid_mode - } + }, } hidden_dim = 10 model = SimpleModel(hidden_dim) model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters()) + if compile_mode: + model.compile() if valid_mode == "FAIL": with pytest.raises(AssertionError): model.save_checkpoint(save_dir=tmpdir, tag=f"tag-{dist.get_rank()}") else: model.save_checkpoint(save_dir=tmpdir, tag=f"tag-{dist.get_rank()}") - def test_checkpoint_unknown_tag_validation(self, tmpdir): + @pytest.mark.parametrize('compile_mode', [True, False]) + def test_checkpoint_unknown_tag_validation(self, tmpdir, compile_mode): config_dict = { "train_batch_size": 2, @@ -52,7 +55,7 @@ def test_checkpoint_unknown_tag_validation(self, tmpdir): }, "checkpoint": { "tag_validation": "foo" - } + }, } hidden_dim = 10 args = args_from_dict(tmpdir, config_dict) @@ -60,3 +63,4 @@ def test_checkpoint_unknown_tag_validation(self, tmpdir): with pytest.raises(deepspeed.DeepSpeedConfigError): model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters()) + model.compile() diff --git a/tests/unit/checkpoint/test_universal_checkpoint.py b/tests/unit/checkpoint/test_universal_checkpoint.py new file mode 100644 index 000000000000..32342019cb62 --- /dev/null +++ b/tests/unit/checkpoint/test_universal_checkpoint.py @@ -0,0 +1,230 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import deepspeed +from types import SimpleNamespace +from torch.utils._pytree import tree_map + +from deepspeed.runtime.utils import required_torch_version +from deepspeed.checkpoint import UNIVERSAL_CHECKPOINT_INFO +from deepspeed.checkpoint.ds_to_universal import main as convert_to_universal + +from unit.common import DistributedTest, DistributedFixture +from unit.simple_model import * +from unit.util import bf16_required_version_check, hpu_lazy_enabled + +from unit.checkpoint.common import compare_opt_state_dicts, compare_state_dicts +from deepspeed.accelerator import get_accelerator + +import pytest +import deepspeed.comm as dist + + +def get_expected_mismatch_keys(): + # torch 1.2.* stores raw tensor id numbers in checkpoint state which leads to + # false positive mismatches in checkpoint state comparisons. + # Newer torch versions store tensor ids as 0, 1, 2, ... + return [] if required_torch_version(min_version=1.4) else ['params'] + + +def maybe_step(t): + return not torch.is_tensor(t) or (t.device.type == 'cpu' and t.numel() == 1) + + +def gather_opt_state(optimizer_state): + + def gather_tensor(t): + + if maybe_step(t): + return t + else: + buffer = [torch.zeros_like(t.flatten()) for _ in range(dist.get_world_size())] + dist.all_gather(buffer, t.flatten()) + return torch.cat(buffer) + + return tree_map(gather_tensor, optimizer_state) + + +def remove_pad_in_opt_state(optimizer_state, num_params): + + def remove_pad(t): + if maybe_step(t): + return t + else: + return t[:num_params] + + return tree_map(remove_pad, optimizer_state) + + +CP_TAG = "test_tag" + + +def init_ds_engine(model, ds_config, use_torch_adam): + + if use_torch_adam: + ds_optimizer = torch.optim.Adam(model.parameters(), lr=0.1) + del ds_config["optimizer"] + model, _, _, _ = deepspeed.initialize(config=ds_config, model=model, optimizer=ds_optimizer) + else: + model, _, _, _ = deepspeed.initialize(config=ds_config, model=model, model_parameters=model.parameters()) + + return model + + +def train_save_convert(ds_config, hidden_dim, load_optim, use_torch_adam, dtype, tmpdir): + if dtype == torch.bfloat16 and not bf16_required_version_check(): + return + + test_step = 8 + + model = SimpleModel(hidden_dim) + if hpu_lazy_enabled(): + device = get_accelerator().device_name() + model = model.to(device) + model = init_ds_engine(model, ds_config, use_torch_adam) + data_loader = random_dataloader(model=model, + total_samples=test_step, + hidden_dim=hidden_dim, + device=model.device, + dtype=dtype) + for batch in data_loader: + loss = model(batch[0], batch[1]) + model.backward(loss) + model.step() + + sd = model.optimizer.optimizer.state_dict() if load_optim else None + + client_state = {} + client_state[UNIVERSAL_CHECKPOINT_INFO] = {} + client_state['iteration'] = test_step + model.save_checkpoint(tmpdir, tag=CP_TAG, client_state=client_state) + + cp_dir = os.path.join(tmpdir, CP_TAG) + univ_cp_dir = f"{cp_dir}_universal" + + args = SimpleNamespace(input_folder=cp_dir, + output_folder=univ_cp_dir, + num_extract_workers=1, + num_merge_workers=1, + keep_temp_folder=False, + strict=True) + + dist.barrier() + if dist.get_rank() == 0: + convert_to_universal(args) + + model_state = model.state_dict() + optimizer_state = None + if load_optim: + optimizer_state = gather_opt_state(model.optimizer.optimizer.state_dict()) + + if dist.get_rank() == 0: + torch.save((model_state, optimizer_state), os.path.join(tmpdir, "baseline_state.pt")) + + dist.barrier() + + return model, sd + + +@pytest.fixture +def ds_config(zero_stage, dtype): + ds_config = { + "train_batch_size": 8, + "optimizer": { + "type": 'Adam' + }, + "zero_optimization": { + "stage": zero_stage, + } + } + if dtype == torch.float16: + ds_config["fp16"] = {"enabled": True, "initial_scale_power": 8} + elif dtype == torch.bfloat16: + ds_config["bf16"] = {"enabled": True} + return ds_config + + +class _baseline(DistributedFixture): + world_size = None + + def run(self, tmpdir, ds_config, zero_stage, dtype, load_optim, use_torch_adam): + hidden_dim = 10 + train_save_convert(ds_config, hidden_dim, load_optim, use_torch_adam, dtype, tmpdir) + + +class baseline_ws2(_baseline): + world_size = 2 + + +class baseline_ws4(_baseline): + world_size = 4 + + +@pytest.mark.parametrize('dtype', [torch.bfloat16, torch.float16, torch.float32]) +@pytest.mark.parametrize("zero_stage", [1]) +@pytest.mark.parametrize("use_torch_adam", [False, True]) +@pytest.mark.parametrize("load_optim", [False, True]) +@pytest.mark.parametrize('compile_mode', [True, False]) +class TestZeROUniversalCheckpointDP(DistributedTest): + + def _run_test(self, tmpdir, dtype, ds_config, load_optim, use_torch_adam, compile_mode): + if dtype == torch.bfloat16 and not bf16_required_version_check(): + pytest.skip( + " DeepSpeed BFloat16 tests need torch >= 1.10, NCCL >= 2.10.3, CUDA > =11.0 and HW support for BFloat16 to run correctly" + ) + + hidden_dim = 10 + loaded_model_state, loaded_optimizer_state = torch.load(f"{tmpdir}/baseline_state.pt") + + ds_config["checkpoint"] = {"load_universal": True} + univ_model = SimpleModel(hidden_dim) + if hpu_lazy_enabled(): + device = get_accelerator().device_name() + univ_model = univ_model.to(device) + + univ_model = init_ds_engine(univ_model, ds_config, use_torch_adam) + if compile_mode: + univ_model.compile() + + univ_model.load_checkpoint(tmpdir, tag=f"{CP_TAG}_universal", load_optimizer_states=load_optim) + + model_state = univ_model.state_dict() + compare_state_dicts(model_state, loaded_model_state) + + if load_optim: + optimizer_state = gather_opt_state(univ_model.optimizer.optimizer.state_dict()) + # padding sizes may differ when dp sizes are different + param_count = sum(p.numel() for p in univ_model.parameters()) + optimizer_state = remove_pad_in_opt_state(optimizer_state, param_count) + loaded_optimizer_state = remove_pad_in_opt_state(loaded_optimizer_state, param_count) + + compare_opt_state_dicts(optimizer_state, loaded_optimizer_state, get_expected_mismatch_keys()) + + # Run training again to verify that the optimizer has necessary states + test_step = 8 + data_loader = random_dataloader(model=univ_model, + total_samples=test_step, + hidden_dim=hidden_dim, + device=univ_model.device, + dtype=dtype) + for batch in data_loader: + loss = univ_model(batch[0], batch[1]) + univ_model.backward(loss) + univ_model.step() + + @pytest.mark.world_size(2) + def test_dp_world_size_2to2(self, baseline_ws2, tmpdir, dtype, ds_config, load_optim, use_torch_adam, + compile_mode): + self._run_test(tmpdir, dtype, ds_config, load_optim, use_torch_adam, compile_mode) + + @pytest.mark.world_size(2) + def test_dp_world_size_4to2(self, baseline_ws4, tmpdir, dtype, ds_config, load_optim, use_torch_adam, + compile_mode): + self._run_test(tmpdir, dtype, ds_config, load_optim, use_torch_adam, compile_mode) + + @pytest.mark.world_size(4) + def test_dp_world_size_2to4(self, baseline_ws2, tmpdir, dtype, ds_config, load_optim, use_torch_adam, + compile_mode): + self._run_test(tmpdir, dtype, ds_config, load_optim, use_torch_adam, compile_mode) diff --git a/tests/unit/checkpoint/test_zero_optimizer.py b/tests/unit/checkpoint/test_zero_optimizer.py index 0b9efb3ec462..035805541cb0 100644 --- a/tests/unit/checkpoint/test_zero_optimizer.py +++ b/tests/unit/checkpoint/test_zero_optimizer.py @@ -12,6 +12,7 @@ from unit.common import DistributedTest, DistributedFixture from unit.simple_model import * +from unit.util import hpu_lazy_enabled from unit.checkpoint.common import * @@ -21,8 +22,9 @@ class TestZeROCheckpoint(DistributedTest): world_size = 2 + @pytest.mark.parametrize('compile_mode', [True, False]) @pytest.mark.parametrize('zero_stage', [3]) - def test_pipeline_checkpoint_loading(self, tmpdir, zero_stage): + def test_pipeline_checkpoint_loading(self, tmpdir, zero_stage, compile_mode): config_dict = { "train_batch_size": 2, "optimizer": { @@ -35,20 +37,25 @@ def test_pipeline_checkpoint_loading(self, tmpdir, zero_stage): "zero_optimization": { "stage": zero_stage, "pipeline_loading_checkpoint": True, - } + }, } hidden_dim = 10 with deepspeed.zero.Init(): models = [SimpleModel(hidden_dim, empty_grad=False) for _ in range(2)] + checkpoint_correctness_verification(config_dict, + models, + hidden_dim, + tmpdir, + load_module_only=True, + compile_mode=compile_mode) - checkpoint_correctness_verification(config_dict, models, hidden_dim, tmpdir, load_module_only=True) - + @pytest.mark.parametrize('compile_mode', [True, False]) @pytest.mark.parametrize('zero_stage, use_cpu_offload, adam_optimizer', [(1, False, 'Adam'), (2, False, 'Adam'), (2, True, 'deepspeed_adam'), (3, False, 'Adam'), (3, True, 'deepspeed_adam')]) - def test_load_optimizer_state(self, tmpdir, zero_stage, use_cpu_offload, adam_optimizer): + def test_load_optimizer_state(self, tmpdir, zero_stage, use_cpu_offload, adam_optimizer, compile_mode): if use_cpu_offload and not deepspeed.ops.__compatible_ops__[CPUAdamBuilder.NAME]: pytest.skip("cpu-adam is not compatible") @@ -72,23 +79,35 @@ def test_load_optimizer_state(self, tmpdir, zero_stage, use_cpu_offload, adam_op "zero_optimization": { "stage": zero_stage, "cpu_offload": use_cpu_offload - } + }, } hidden_dim = 10 + fp16 = True + zero3_init_dtype = None if zero_stage == 3: - with deepspeed.zero.Init(): + with deepspeed.zero.Init(dtype=zero3_init_dtype): models = [SimpleModel(hidden_dim, empty_grad=False) for _ in range(2)] else: models = [SimpleModel(hidden_dim, empty_grad=False) for _ in range(2)] + if hpu_lazy_enabled(): + device = get_accelerator().device_name() + models = [model.to(device) for model in models] - checkpoint_correctness_verification(config_dict, models, hidden_dim, tmpdir, load_optimizer_states=True) - + checkpoint_correctness_verification(config_dict, + models, + hidden_dim, + tmpdir, + fp16=fp16, + load_optimizer_states=True, + compile_mode=compile_mode) + + @pytest.mark.parametrize('compile_mode', [True, False]) @pytest.mark.parametrize('zero_stage, use_cpu_offload, adam_optimizer', [(1, False, "Adam"), (2, False, "Adam"), (2, True, 'deepspeed_adam'), (3, False, 'Adam'), (3, True, 'deepspeed_adam')]) - def test_not_load_optimizer_state(self, tmpdir, zero_stage, use_cpu_offload, adam_optimizer): + def test_not_load_optimizer_state(self, tmpdir, zero_stage, use_cpu_offload, adam_optimizer, compile_mode): if use_cpu_offload and not deepspeed.ops.__compatible_ops__[CPUAdamBuilder.NAME]: pytest.skip("cpu-adam is not compatible") @@ -110,22 +129,34 @@ def test_not_load_optimizer_state(self, tmpdir, zero_stage, use_cpu_offload, ada "zero_optimization": { "stage": zero_stage, "cpu_offload": use_cpu_offload - } + }, } hidden_dim = 10 + fp16 = True + zero3_init_dtype = None if zero_stage == 3: global DeepSpeedZeroOptimizer_Stage3 from deepspeed.runtime.zero.stage3 import DeepSpeedZeroOptimizer_Stage3 - with deepspeed.zero.Init(): + with deepspeed.zero.Init(dtype=zero3_init_dtype): models = [SimpleModel(hidden_dim, empty_grad=False) for _ in range(2)] else: models = [SimpleModel(hidden_dim, empty_grad=False) for _ in range(2)] + if hpu_lazy_enabled(): + device = get_accelerator().device_name() + models = [model.to(device) for model in models] - checkpoint_correctness_verification(config_dict, models, hidden_dim, tmpdir, load_optimizer_states=False) - + checkpoint_correctness_verification(config_dict, + models, + hidden_dim, + tmpdir, + fp16=fp16, + load_optimizer_states=False, + compile_mode=compile_mode) + + @pytest.mark.parametrize('compile_mode', [True, False]) @pytest.mark.parametrize('zero_stage', [1, 2]) - def test_hybrid_optimizer_state(self, tmpdir, zero_stage): + def test_hybrid_optimizer_state(self, tmpdir, zero_stage, compile_mode): config_dict = { "train_micro_batch_size_per_gpu": 2, "gradient_accumulation_steps": 2, @@ -137,10 +168,15 @@ def test_hybrid_optimizer_state(self, tmpdir, zero_stage): "fp16": { "enabled": True, "initial_scale_power": 8 - } + }, } hidden_dim = 10 + fp16 = True + models = [SimpleModel(hidden_dim=hidden_dim) for _ in range(2)] + if hpu_lazy_enabled(): + device = get_accelerator().device_name() + models = [model.to(device) for model in models] optimizers = [HybridStateOptimizer(model.parameters()) for model in models] checkpoint_correctness_verification(config_dict, @@ -148,10 +184,13 @@ def test_hybrid_optimizer_state(self, tmpdir, zero_stage): base_optimizers=optimizers, hidden_dim=hidden_dim, tmpdir=tmpdir, - load_optimizer_states=True) + fp16=fp16, + load_optimizer_states=True, + compile_mode=compile_mode) + @pytest.mark.parametrize('compile_mode', [True, False]) @pytest.mark.parametrize('zero_stage', [0, 1, 2, 3]) - def test_load_module_only(self, tmpdir, zero_stage): + def test_load_module_only(self, tmpdir, zero_stage, compile_mode): config_dict = { "train_batch_size": 2, "optimizer": { @@ -163,17 +202,28 @@ def test_load_module_only(self, tmpdir, zero_stage): }, "zero_optimization": { "stage": zero_stage, - } + }, } hidden_dim = 10 + fp16 = True + zero3_init_dtype = None if zero_stage == 3: - with deepspeed.zero.Init(): + with deepspeed.zero.Init(dtype=zero3_init_dtype): models = [SimpleModel(hidden_dim, empty_grad=False) for _ in range(2)] else: models = [SimpleModel(hidden_dim, empty_grad=False) for _ in range(2)] + if hpu_lazy_enabled(): + device = get_accelerator().device_name() + models = [model.to(device) for model in models] - checkpoint_correctness_verification(config_dict, models, hidden_dim, tmpdir, load_module_only=True) + checkpoint_correctness_verification(config_dict, + models, + hidden_dim, + tmpdir, + fp16=fp16, + load_module_only=True, + compile_mode=compile_mode) class ws4_model_checkpoint(DistributedFixture): @@ -197,8 +247,13 @@ def run(self, class_tmpdir, elastic_save, load_optim): hidden_dim = 10 model = SimpleModel(hidden_dim) + dtype = torch.half model, _, _, _ = deepspeed.initialize(config=ds_config, model=model, model_parameters=model.parameters()) - data_loader = random_dataloader(model=model, total_samples=8, hidden_dim=hidden_dim, device=model.device) + data_loader = random_dataloader(model=model, + total_samples=8, + hidden_dim=hidden_dim, + device=model.device, + dtype=dtype) for n, batch in enumerate(data_loader): loss = model(batch[0], batch[1]) model.backward(loss) @@ -209,13 +264,14 @@ def run(self, class_tmpdir, elastic_save, load_optim): model.save_checkpoint(class_tmpdir) +@pytest.mark.parametrize('compile_mode', [True, False]) @pytest.mark.parametrize("elastic_save", [True, False]) @pytest.mark.parametrize("elastic_load", [True, False]) @pytest.mark.parametrize("load_optim", [True, False]) class TestZeROElasticCheckpoint(DistributedTest): world_size = 2 - def test_elastic_checkpoint_fixed_dp(self, tmpdir, elastic_save, elastic_load, load_optim): + def test_elastic_checkpoint_fixed_dp(self, tmpdir, elastic_save, elastic_load, load_optim, compile_mode): ds_config = { "train_batch_size": 2, "optimizer": { @@ -228,7 +284,7 @@ def test_elastic_checkpoint_fixed_dp(self, tmpdir, elastic_save, elastic_load, l "zero_optimization": { "stage": 2, "elastic_checkpoint": elastic_save - } + }, } hidden_dim = 10 @@ -237,10 +293,19 @@ def test_elastic_checkpoint_fixed_dp(self, tmpdir, elastic_save, elastic_load, l # Newer torch versions store tensor ids as 0, 1, 2, ... expected_mismatch_keys = [] if required_torch_version(min_version=1.4) else ['params'] models = [SimpleModel(hidden_dim) for _ in range(2)] + + dtype = torch.half model, _, _, _ = deepspeed.initialize(config=ds_config, model=models[0], model_parameters=models[0].parameters()) - data_loader = random_dataloader(model=model, total_samples=8, hidden_dim=hidden_dim, device=model.device) + if compile_mode: + model.compile() + run_steps = 8 + data_loader = random_dataloader(model=model, + total_samples=run_steps, + hidden_dim=hidden_dim, + device=model.device, + dtype=dtype) for n, batch in enumerate(data_loader): loss = model(batch[0], batch[1]) model.backward(loss) @@ -254,6 +319,8 @@ def test_elastic_checkpoint_fixed_dp(self, tmpdir, elastic_save, elastic_load, l model, _, _, _ = deepspeed.initialize(config=ds_config, model=models[1], model_parameters=models[1].parameters()) + if compile_mode: + model.compile() model.load_checkpoint(tmpdir, load_optimizer_states=load_optim) if load_optim: @@ -261,14 +328,18 @@ def test_elastic_checkpoint_fixed_dp(self, tmpdir, elastic_save, elastic_load, l curr_sd = model.optimizer.optimizer.state_dict() compare_opt_state_dicts(curr_sd, saved_sd, expected_mismatch_keys) - data_loader = random_dataloader(model=model, total_samples=8, hidden_dim=hidden_dim, device=model.device) + data_loader = random_dataloader(model=model, + total_samples=8, + hidden_dim=hidden_dim, + device=model.device, + dtype=dtype) for n, batch in enumerate(data_loader): loss = model(batch[0], batch[1]) model.backward(loss) model.step() def test_elastic_checkpoint_change_dp(self, ws4_model_checkpoint, class_tmpdir, elastic_save, elastic_load, - load_optim): + load_optim, compile_mode): ds_config = { "train_batch_size": 4, "optimizer": { @@ -281,13 +352,15 @@ def test_elastic_checkpoint_change_dp(self, ws4_model_checkpoint, class_tmpdir, "zero_optimization": { "stage": 2, "elastic_checkpoint": elastic_load - } + }, } hidden_dim = 10 model = SimpleModel(hidden_dim) # Load checkpoint with dp world size = 2 model, _, _, _ = deepspeed.initialize(config=ds_config, model=model, model_parameters=model.parameters()) + if compile_mode: + model.compile() if load_optim: with pytest.raises(deepspeed.runtime.zero.utils.ZeRORuntimeException): model.load_checkpoint(class_tmpdir, load_optimizer_states=load_optim) @@ -295,11 +368,12 @@ def test_elastic_checkpoint_change_dp(self, ws4_model_checkpoint, class_tmpdir, model.load_checkpoint(class_tmpdir, load_optimizer_states=load_optim) +@pytest.mark.parametrize('compile_mode', [True, False]) class TestZeROSaveLoadEdgeCase(DistributedTest): world_size = 2 @pytest.mark.parametrize('zero_stage', [0, 1, 2, 3]) - def test_immediate_save_load(self, tmpdir, zero_stage): + def test_immediate_save_load(self, tmpdir, zero_stage, compile_mode): config_dict = { "train_batch_size": 4, "optimizer": { @@ -311,12 +385,14 @@ def test_immediate_save_load(self, tmpdir, zero_stage): }, "zero_optimization": { "stage": zero_stage, - } + }, } hidden_dim = 10 model = SimpleModel(hidden_dim) ds_model = create_deepspeed_model(config_dict=config_dict, model=model, base_optimizer=None) + if compile_mode: + ds_model.compile() ds_model.save_checkpoint(tmpdir) ds_model.load_checkpoint(tmpdir, load_optimizer_states=False, @@ -324,7 +400,7 @@ def test_immediate_save_load(self, tmpdir, zero_stage): load_module_only=False) @pytest.mark.parametrize('zero_stage', [0, 1, 2, 3]) - def test_load_immediate_save(self, tmpdir, zero_stage): + def test_load_immediate_save(self, tmpdir, zero_stage, compile_mode): config_dict = { "train_batch_size": 4, "optimizer": { @@ -336,7 +412,7 @@ def test_load_immediate_save(self, tmpdir, zero_stage): }, "zero_optimization": { "stage": zero_stage, - } + }, } hidden_dim = 10 model = SimpleModel(hidden_dim) @@ -344,6 +420,8 @@ def test_load_immediate_save(self, tmpdir, zero_stage): # 1. pretrain a model and save it dtype = torch.half ds_model = create_deepspeed_model(config_dict=config_dict, model=model, base_optimizer=None) + if compile_mode: + ds_model.compile() data_loader = random_dataloader(model=ds_model, total_samples=1, hidden_dim=hidden_dim, @@ -359,6 +437,8 @@ def test_load_immediate_save(self, tmpdir, zero_stage): # 2. load and immediately save a model with a fresh ds engine ds_model = create_deepspeed_model(config_dict=config_dict, model=model, base_optimizer=None) + if compile_mode: + ds_model.compile() ds_model.load_checkpoint(tmpdir, load_optimizer_states=False, load_lr_scheduler_states=False, @@ -366,7 +446,7 @@ def test_load_immediate_save(self, tmpdir, zero_stage): ds_model.save_checkpoint(tmpdir) @pytest.mark.parametrize('zero_stage', [0, 1, 2, 3]) - def test_save_before_accum_grad_is_done(self, tmpdir, zero_stage): + def test_save_before_accum_grad_is_done(self, tmpdir, zero_stage, compile_mode): config_dict = { "optimizer": { "type": 'Adam' @@ -386,16 +466,19 @@ def test_save_before_accum_grad_is_done(self, tmpdir, zero_stage): hidden_dim = 10 model = SimpleModel(hidden_dim) + dtype = torch.half # This test reproduces a bug where one tries to retrieve a 16bit model before grad_accum # cycle was completed. # So we config grad_accum=2 and step only once and save_16bit_model ds_model = create_deepspeed_model(config_dict=config_dict, model=model, base_optimizer=None) + if compile_mode: + ds_model.compile() data_loader = random_dataloader(model=ds_model, total_samples=2, hidden_dim=hidden_dim, device=ds_model.device, - dtype=torch.half) + dtype=dtype) batch = next(iter(data_loader)) loss = ds_model(batch[0], batch[1]) @@ -411,11 +494,12 @@ def test_save_before_accum_grad_is_done(self, tmpdir, zero_stage): ds_model.save_checkpoint(tmpdir) +@pytest.mark.parametrize('compile_mode', [True, False]) class TestZeROCheckpointFrozenWeights(DistributedTest): world_size = 2 @pytest.mark.parametrize('zero_stage', [1, 2, 3]) - def test_load_optimizer_state(self, tmpdir, zero_stage): + def test_load_optimizer_state(self, tmpdir, zero_stage, compile_mode): config_dict = { "train_batch_size": 2, @@ -436,17 +520,29 @@ def test_load_optimizer_state(self, tmpdir, zero_stage): "wall_clock_breakdown": True, "zero_optimization": { "stage": zero_stage - } + }, } + fp16 = True + dtype = None + hidden_dim = 10 - with deepspeed.zero.Init(enabled=zero_stage == 3): + with deepspeed.zero.Init(enabled=zero_stage == 3, dtype=dtype): models = [SimpleFrozenModel(hidden_dim, empty_grad=False) for _ in range(2)] + if hpu_lazy_enabled(): + device = get_accelerator().device_name() + models = [model.to(device) for model in models] - checkpoint_correctness_verification(config_dict, models, hidden_dim, tmpdir, load_optimizer_states=True) + checkpoint_correctness_verification(config_dict, + models, + hidden_dim, + tmpdir, + load_optimizer_states=True, + fp16=fp16, + compile_mode=compile_mode) @pytest.mark.parametrize('zero_stage', [1, 2, 3]) - def test_not_load_optimizer_state(self, tmpdir, zero_stage): + def test_not_load_optimizer_state(self, tmpdir, zero_stage, compile_mode): config_dict = { "train_batch_size": 2, @@ -465,17 +561,27 @@ def test_not_load_optimizer_state(self, tmpdir, zero_stage): }, "zero_optimization": { "stage": zero_stage - } + }, } hidden_dim = 10 - - with deepspeed.zero.Init(enabled=zero_stage == 3): + fp16 = True + dtype = None + with deepspeed.zero.Init(enabled=zero_stage == 3, dtype=dtype): models = [SimpleFrozenModel(hidden_dim, empty_grad=False) for _ in range(2)] + if hpu_lazy_enabled(): + device = get_accelerator().device_name() + models = [model.to(device) for model in models] - checkpoint_correctness_verification(config_dict, models, hidden_dim, tmpdir, load_optimizer_states=False) + checkpoint_correctness_verification(config_dict, + models, + hidden_dim, + tmpdir, + load_optimizer_states=False, + fp16=fp16, + compile_mode=compile_mode) @pytest.mark.parametrize('zero_stage', [1, 2, 3]) - def test_load_module_only(self, tmpdir, zero_stage): + def test_load_module_only(self, tmpdir, zero_stage, compile_mode): config_dict = { "train_batch_size": 2, "optimizer": { @@ -487,17 +593,27 @@ def test_load_module_only(self, tmpdir, zero_stage): }, "zero_optimization": { "stage": zero_stage, - } + }, } hidden_dim = 10 - - with deepspeed.zero.Init(enabled=zero_stage == 3): + fp16 = True + dtype = None + with deepspeed.zero.Init(enabled=zero_stage == 3, dtype=dtype): models = [SimpleFrozenModel(hidden_dim, empty_grad=False) for _ in range(2)] + if hpu_lazy_enabled(): + device = get_accelerator().device_name() + models = [model.to(device) for model in models] - checkpoint_correctness_verification(config_dict, models, hidden_dim, tmpdir, load_module_only=True) + checkpoint_correctness_verification(config_dict, + models, + hidden_dim, + tmpdir, + load_module_only=True, + fp16=fp16, + compile_mode=compile_mode) @pytest.mark.parametrize('zero_stage', [1, 2]) - def test_save_exclude_frozen_weights(self, tmpdir, zero_stage): + def test_save_exclude_frozen_weights(self, tmpdir, zero_stage, compile_mode): world_size = 1 config_dict = { "train_micro_batch_size_per_gpu": 1, @@ -510,13 +626,15 @@ def test_save_exclude_frozen_weights(self, tmpdir, zero_stage): }, "zero_optimization": { "stage": zero_stage, - } + }, } hidden_dim = 10 model = SimpleFrozenModel(hidden_dim, empty_grad=False) ds_engine, _, _, _ = deepspeed.initialize(model=model, model_parameters=model.parameters(), config=config_dict) + if compile_mode: + ds_engine.compile() # Validate backwards-compatibility of including frozen parameters in checkpoint all_ckpt_folder = os.path.join(tmpdir, 'all_params') @@ -545,7 +663,7 @@ def test_save_exclude_frozen_weights(self, tmpdir, zero_stage): assert loaded_trainable_param_names == trainable_param_names @pytest.mark.parametrize('zero_stage', [1, 2]) - def test_save_exclude_custom_frozen_weights(self, tmpdir, zero_stage): + def test_save_exclude_custom_frozen_weights(self, tmpdir, zero_stage, compile_mode): world_size = 1 config_dict = { "train_micro_batch_size_per_gpu": 1, @@ -565,6 +683,8 @@ def test_save_exclude_custom_frozen_weights(self, tmpdir, zero_stage): model = SimpleFrozenModel(hidden_dim, empty_grad=False) ds_engine, _, _, _ = deepspeed.initialize(model=model, model_parameters=model.parameters(), config=config_dict) + if compile_mode: + ds_engine.compile() # Validate custom state_dict model state_dict_bk = model.state_dict @@ -589,9 +709,10 @@ def test_save_exclude_custom_frozen_weights(self, tmpdir, zero_stage): class TestSaveTensorClone(DistributedTest): world_size = 1 + @pytest.mark.parametrize('compile_mode', [True, False]) @pytest.mark.parametrize('zero_stage', [1, 2]) @pytest.mark.parametrize('use_cpu_device', [True, False]) - def test_save_tensor_clone(self, tmpdir, zero_stage, use_cpu_device): + def test_save_tensor_clone(self, tmpdir, zero_stage, use_cpu_device, compile_mode): ds_config = { "optimizer": { @@ -603,11 +724,14 @@ def test_save_tensor_clone(self, tmpdir, zero_stage, use_cpu_device): "train_batch_size": 1, "train_micro_batch_size_per_gpu": 1 } + dtype = torch.float16 hidden_dim = 1024 - model = SimpleModel(hidden_dim, nlayers=4).half() + model = SimpleModel(hidden_dim, nlayers=4).to(dtype=dtype) ref_model_state_dict = model.state_dict() ds_engine, _, _, _ = deepspeed.initialize(model=model, config_params=ds_config) + if compile_mode: + ds_engine.compile() clone_device = torch.device('cpu') if use_cpu_device else get_accelerator().current_device() clone_state_dict = clone_tensors_for_torch_save(ds_engine.module.state_dict()) compare_state_dicts(ref_model_state_dict, clone_state_dict) @@ -624,8 +748,9 @@ class TestZeRONonDistributed(DistributedTest): world_size = 1 init_distributed = False + @pytest.mark.parametrize('compile_mode', [True, False]) @pytest.mark.parametrize('zero_stage', [1, 2, 3]) - def test_chmod_exception_handling(self, monkeypatch, zero_stage): + def test_chmod_exception_handling(self, monkeypatch, zero_stage, compile_mode): config_dict = { "optimizer": { @@ -643,6 +768,8 @@ def test_chmod_exception_handling(self, monkeypatch, zero_stage): model=net, model_parameters=net.parameters()) + if compile_mode: + engine.compile() log_called = False def mock_logger_info(message, *args, **kwargs): diff --git a/tests/unit/ci_promote_marker.py b/tests/unit/ci_promote_marker.py new file mode 100644 index 000000000000..7bd83195723a --- /dev/null +++ b/tests/unit/ci_promote_marker.py @@ -0,0 +1,606 @@ +# Copyright (c) 2023 Habana Labs, Ltd. an Intel Company +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +hpu_ci_tests = [ + "unit/runtime/test_ds_initialize.py::TestClientOptimizer::test[None]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_train_schedule_singlestage", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_midstage[10]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_midstage[8]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_schedule_firststage", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_midstage[3]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_midstage[1]", + "unit/launcher/test_ds_arguments.py::test_no_ds_arguments", + "unit/launcher/test_ds_arguments.py::test_no_ds_enable_argument", + "unit/runtime/test_ds_config_model.py::test_config_base", + "unit/comm/test_dist.py::TestWorldSizeOverrideDistTest::test_world_size_1", + "unit/checkpoint/test_reshape_checkpoint.py::test_reshape_222_to_211", + "unit/checkpoint/test_reshape_checkpoint.py::test_reshape_222_to_122", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[roberta-large-fill-mask-bf16-noCG]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[bert-base-multilingual-uncased-fill-mask-fp32-noCG]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[deepset/roberta-base-squad2-question-answering-fp32-CG]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[bert-base-uncased-fill-mask-bf16-CG]", + "unit/runtime/zero/test_zero_tiled.py::test_tiled_init[2-2]", + "unit/runtime/zero/test_zero_tiled.py::test_tiled_baddim[33-33]", + "unit/runtime/zero/test_zero_tiled.py::test_tiled_baddim[0-0]", + "unit/runtime/zero/test_zero_tiled.py::test_tiled_init[1-1]", + "unit/runtime/zero/test_zero_tiled.py::test_tiled_init[32-32]", + "unit/runtime/zero/test_zero_tiled.py::test_tiled_init[5-5]", + "unit/runtime/half_precision/test_bf16.py::TestZeroAllowUntestedOptimizer::test", + "unit/runtime/test_ds_config_dict.py::TestInitNoOptimizer::test", + "unit/runtime/test_ds_config_dict.py::TestDistInit::test", "unit/launcher/test_run.py::test_parser_local", + "unit/launcher/test_run.py::test_parser_mutual_exclusive", + "unit/runtime/test_lr_schedulers.py::TestSchedulerOptimizerParity::test[WarmupLR-params0]", + "unit/runtime/test_lr_schedulers.py::TestSchedulerOptimizerParity::test[OneCycle-params2]", + "unit/runtime/test_lr_schedulers.py::TestSchedulerOptimizerParity::test[LRRangeTest-params3]", + "unit/runtime/test_lr_schedulers.py::TestSchedulerOptimizerParity::test[WarmupDecayLR-params1]", + "unit/runtime/test_lr_schedulers.py::TestOneCycle::test_mom[0.08-0.09-0.001-100]", + "unit/runtime/test_lr_schedulers.py::TestOneCycle::test_lr[0.001-0.1-0.1-21-21]", + "unit/runtime/test_lr_schedulers.py::TestOneCycle::test_mom[0.08-0.09-0-211]", + "unit/runtime/test_lr_schedulers.py::TestOneCycle::test_lr[1e-05-0.1-0-10-0]", + "unit/runtime/test_lr_schedulers.py::TestOneCycle::test_lr[0.001-0.1-0-21-21]", + "unit/runtime/test_lr_schedulers.py::TestOneCycle::test_mom[0.08-0.09-0-210]", + "unit/runtime/test_lr_schedulers.py::TestOneCycle::test_mom[0.08-0.09-0.001-101]", + "unit/runtime/test_lr_schedulers.py::TestOneCycle::test_lr[1e-05-0.01-0.001-10-100]", + "unit/runtime/test_lr_schedulers.py::TestOneCycle::test_lr[1e-05-0.01-0.001-10-101]", + "unit/runtime/utils/test_partition.py::test_float_balanced", + "unit/runtime/utils/test_partition.py::test_int_balanced", + "unit/runtime/utils/test_partition.py::test_easy_balance_uniform", + "unit/runtime/utils/test_partition.py::test_float_midheavy", + "unit/runtime/utils/test_partition.py::test_short_partition_uniform", + "unit/autotuning/test_autotuning.py::test_resource_manager_arg_mappings[arg_mappings2]", + "unit/autotuning/test_autotuning.py::test_command_line", + "unit/autotuning/test_autotuning.py::test_resource_manager_arg_mappings[arg_mappings4]", + "unit/autotuning/test_autotuning.py::test_resource_manager_arg_mappings[arg_mappings3]", + "unit/autotuning/test_autotuning.py::test_resource_manager_arg_mappings[None]", + "unit/autotuning/test_autotuning.py::test_resource_manager_arg_mappings[arg_mappings1]", + "unit/checkpoint/test_latest_checkpoint.py::TestLatestCheckpoint::test_existing_latest", + "unit/runtime/test_pld.py::TestNonPLDModel::test_non_pld_model", + "unit/runtime/zero/test_zero_config.py::test_zero_config_deprecatedfields", + "unit/runtime/zero/test_zero_config.py::test_zero_config_aliasfields", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestCheckpointNonTensor::test_ckpt_non_tensor_output[None]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestCheckpointNonTensor::test_ckpt_non_tensor_input[None]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestCheckpointNonTensorOutputOrdering::test_ckpt_non_tensor_output_ordering[non_tensor_output3]", + "unit/runtime/test_ds_initialize.py::TestClientOptimizer::test[Optimizer]", + "unit/runtime/test_ds_initialize.py::TestClientOptimizer::test[optimizer_type2]", + "unit/elasticity/test_elastic.py::test_proper_mbsz", "unit/runtime/pipe/test_topology.py::test_topology_rank_repr", + "unit/runtime/pipe/test_topology.py::test_topology_2d", "unit/runtime/pipe/test_topology.py::test_primes", + "unit/runtime/sparse_tensor/test_csr.py::test_csr_addition_different", + "unit/utils/test_get_optim_files.py::test_get_optim_files[2]", + "unit/utils/test_get_optim_files.py::test_get_optim_files[12]", + "unit/utils/test_get_optim_files.py::test_get_optim_files[24]", + "unit/utils/test_get_optim_files.py::test_get_optim_files[1]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[bert-base-multilingual-uncased-fill-mask-fp32-noCG]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[deepset/roberta-base-squad2-question-answering-fp32-CG]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[roberta-large-fill-mask-bf16-noCG]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[bert-base-uncased-fill-mask-bf16-CG]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[gpt2-text-generation-fp16-CG]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[gpt2-text-generation-fp16-noCG]" +] + +hpu_ci_tests_4cards = [ + "unit/runtime/test_ds_initialize.py::TestClientOptimizer::test[None]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_train_schedule_singlestage", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_midstage[10]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_midstage[8]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_schedule_firststage", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_midstage[3]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_midstage[1]", + "unit/launcher/test_ds_arguments.py::test_no_ds_arguments", + "unit/launcher/test_ds_arguments.py::test_no_ds_enable_argument", + "unit/runtime/test_ds_config_model.py::test_config_base", + "unit/comm/test_dist.py::TestWorldSizeOverrideDistTest::test_world_size_1", + "unit/checkpoint/test_reshape_checkpoint.py::test_reshape_222_to_211", + "unit/checkpoint/test_reshape_checkpoint.py::test_reshape_222_to_122", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[roberta-large-fill-mask-bf16-noCG]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[bert-base-multilingual-uncased-fill-mask-fp32-noCG]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[deepset/roberta-base-squad2-question-answering-fp32-CG]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[bert-base-uncased-fill-mask-bf16-CG]", + "unit/runtime/zero/test_zero_tiled.py::test_tiled_init[2-2]", + "unit/runtime/zero/test_zero_tiled.py::test_tiled_baddim[33-33]", + "unit/runtime/zero/test_zero_tiled.py::test_tiled_baddim[0-0]", + "unit/runtime/zero/test_zero_tiled.py::test_tiled_init[1-1]", + "unit/runtime/zero/test_zero_tiled.py::test_tiled_init[32-32]", + "unit/runtime/zero/test_zero_tiled.py::test_tiled_init[5-5]", + "unit/runtime/half_precision/test_bf16.py::TestZeroAllowUntestedOptimizer::test", + "unit/runtime/test_ds_config_dict.py::TestInitNoOptimizer::test", + "unit/runtime/test_ds_config_dict.py::TestDistInit::test", "unit/launcher/test_run.py::test_parser_local", + "unit/launcher/test_run.py::test_parser_mutual_exclusive", + "unit/runtime/test_lr_schedulers.py::TestSchedulerOptimizerParity::test[WarmupLR-params0]", + "unit/runtime/test_lr_schedulers.py::TestSchedulerOptimizerParity::test[OneCycle-params2]", + "unit/runtime/test_lr_schedulers.py::TestSchedulerOptimizerParity::test[LRRangeTest-params3]", + "unit/runtime/test_lr_schedulers.py::TestSchedulerOptimizerParity::test[WarmupDecayLR-params1]", + "unit/runtime/test_lr_schedulers.py::TestOneCycle::test_mom[0.08-0.09-0.001-100]", + "unit/runtime/test_lr_schedulers.py::TestOneCycle::test_lr[0.001-0.1-0.1-21-21]", + "unit/runtime/test_lr_schedulers.py::TestOneCycle::test_mom[0.08-0.09-0-211]", + "unit/runtime/test_lr_schedulers.py::TestOneCycle::test_lr[1e-05-0.1-0-10-0]", + "unit/runtime/test_lr_schedulers.py::TestOneCycle::test_lr[0.001-0.1-0-21-21]", + "unit/runtime/test_lr_schedulers.py::TestOneCycle::test_mom[0.08-0.09-0-210]", + "unit/runtime/test_lr_schedulers.py::TestOneCycle::test_mom[0.08-0.09-0.001-101]", + "unit/runtime/test_lr_schedulers.py::TestOneCycle::test_lr[1e-05-0.01-0.001-10-100]", + "unit/runtime/test_lr_schedulers.py::TestOneCycle::test_lr[1e-05-0.01-0.001-10-101]", + "unit/runtime/utils/test_partition.py::test_float_balanced", + "unit/runtime/utils/test_partition.py::test_int_balanced", + "unit/runtime/utils/test_partition.py::test_easy_balance_uniform", + "unit/runtime/utils/test_partition.py::test_float_midheavy", + "unit/runtime/utils/test_partition.py::test_short_partition_uniform", + "unit/autotuning/test_autotuning.py::test_resource_manager_arg_mappings[arg_mappings2]", + "unit/autotuning/test_autotuning.py::test_command_line", + "unit/autotuning/test_autotuning.py::test_resource_manager_arg_mappings[arg_mappings4]", + "unit/autotuning/test_autotuning.py::test_resource_manager_arg_mappings[arg_mappings3]", + "unit/autotuning/test_autotuning.py::test_resource_manager_arg_mappings[None]", + "unit/autotuning/test_autotuning.py::test_resource_manager_arg_mappings[arg_mappings1]", + "unit/checkpoint/test_latest_checkpoint.py::TestLatestCheckpoint::test_existing_latest", + "unit/runtime/test_pld.py::TestNonPLDModel::test_non_pld_model", + "unit/runtime/zero/test_zero_config.py::test_zero_config_deprecatedfields", + "unit/runtime/zero/test_zero_config.py::test_zero_config_aliasfields", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestCheckpointNonTensor::test_ckpt_non_tensor_output[None]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestCheckpointNonTensor::test_ckpt_non_tensor_input[None]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestCheckpointNonTensorOutputOrdering::test_ckpt_non_tensor_output_ordering[non_tensor_output3]", + "unit/runtime/test_ds_initialize.py::TestClientOptimizer::test[Optimizer]", + "unit/runtime/test_ds_initialize.py::TestClientOptimizer::test[optimizer_type2]", + "unit/elasticity/test_elastic.py::test_proper_mbsz", "unit/runtime/pipe/test_topology.py::test_topology_rank_repr", + "unit/runtime/pipe/test_topology.py::test_topology_2d", "unit/runtime/pipe/test_topology.py::test_primes", + "unit/runtime/sparse_tensor/test_csr.py::test_csr_addition_different", + "unit/utils/test_get_optim_files.py::test_get_optim_files[2]", + "unit/utils/test_get_optim_files.py::test_get_optim_files[12]", + "unit/utils/test_get_optim_files.py::test_get_optim_files[24]", + "unit/utils/test_get_optim_files.py::test_get_optim_files[1]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[bert-base-multilingual-uncased-fill-mask-fp32-noCG]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[deepset/roberta-base-squad2-question-answering-fp32-CG]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[roberta-large-fill-mask-bf16-noCG]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[bert-base-uncased-fill-mask-bf16-CG]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[gpt2-text-generation-fp16-CG]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[gpt2-text-generation-fp16-noCG]", + "unit/comm/test_dist.py::TestDistInitNoEnv::test", + "unit/runtime/zero/test_zero_context.py::TestSerialContext::test_throughput_calculation", + "unit/runtime/test_ds_initialize.py::TestOptimizerImplementation::test[bf16-bf16-zero2]", + "unit/runtime/test_ds_initialize.py::TestOptimizerImplementation::test[bf16-bf16-zero1]", + "unit/runtime/test_ds_initialize.py::TestOptimizerImplementation::test[fp32-fp32-zero1]", + "unit/runtime/zero/test_zero.py::TestZeroAdamOptimizerStepCount::test[2]", + "unit/runtime/zero/test_zero.py::TestZeroAdamOptimizerStepCount::test[3]", + "unit/runtime/zero/test_zero_context.py::TestGatherUpdate::test", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_immediate_save_load[1]", + "unit/runtime/zero/test_zero_context.py::TestScatterGather::test", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_immediate_save_load[2]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[2-False-Adam]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-True-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[2]", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[2-False]", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[1-False]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[3]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[3]", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[1-False]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_not_load_optimizer_state[2]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_module_only[3]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_not_load_optimizer_state[3]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[2-True-deepspeed_adam]", + "unit/runtime/pipe/test_pipe.py::TestPipeCifar10::test_pipe_use_reentrant[topo_config0]", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[2-True]", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[none-EleutherAI/gpt-neo-125m-zero_stage=2-bsz=1]", + "unit/runtime/pipe/test_pipe.py::TestPipeCifar10::test_pipe_base[topo_config1]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[2-True-deepspeed_adam]", + "unit/runtime/pipe/test_pipe.py::TestPipeCifar10::test_pipe_use_reentrant[topo_config2]", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[cpu-EleutherAI/gpt-neo-125m-zero_stage=3-bsz=1]", + "unit/runtime/pipe/test_pipe.py::TestPipeCifar10::test_pipe_use_reentrant[topo_config1]", + "unit/runtime/half_precision/test_fp16.py::TestFP16OptimizerForMoE::test_unfused_gradnorm", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[none-facebook/opt-350m-zero_stage=3-bsz=1]", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShard::test[facebook/opt-350m-fp16]" +] + +hpu_promote_tests = [ + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[Adam-False-True-False-resulting_optimizer9]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[AdamW-True-True-False-resulting_optimizer3]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[Adam-False-True-True-resulting_optimizer13]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[AdamW-False-True-False-resulting_optimizer1]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[AdamW-True-True-True-resulting_optimizer7]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[AdamW-False-True-True-resulting_optimizer5]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[Adam-True-True-True-resulting_optimizer15]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[Adam-True-True-False-resulting_optimizer11]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[AdamW-True-False-False-resulting_optimizer2]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[Adam-False-False-False-resulting_optimizer8]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[AdamW-False-False-False-resulting_optimizer0]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[Adam-True-False-True-resulting_optimizer14]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[Adam-False-False-True-resulting_optimizer12]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[AdamW-False-False-True-resulting_optimizer4]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[AdamW-True-False-True-resulting_optimizer6]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[Adam-True-False-False-resulting_optimizer10]", + "unit/runtime/half_precision/test_bf16.py::TestZeroSupportedClientOptimizer::test[Adam]", + "unit/runtime/half_precision/test_bf16.py::TestZeroSupportedClientOptimizer::test[FusedAdam]", + "unit/runtime/half_precision/test_fp16.py::TestZeroSupportedClientOptimizer::test[Adam-1]", + "unit/runtime/half_precision/test_fp16.py::TestZeroSupportedClientOptimizer::test[FusedAdam-1]", + "unit/runtime/half_precision/test_fp16.py::TestZeroSupportedClientOptimizer::test[Adam-2]", + "unit/runtime/half_precision/test_fp16.py::TestZeroSupportedClientOptimizer::test[Adam-3]", + "unit/runtime/half_precision/test_fp16.py::TestZeroSupportedClientOptimizer::test[FusedAdam-2]", + "unit/runtime/half_precision/test_fp16.py::TestZeroSupportedClientOptimizer::test[FusedAdam-3]", + "unit/runtime/zero/test_zero.py::TestZeroUnbalancedGradients::test[2]", + "unit/runtime/zero/test_zero.py::TestZeroUnbalancedGradients::test[1]", + "unit/runtime/zero/test_zero.py::TestZeroUnbalancedGradients::test[3]", + "unit/compression/test_compression.py::TestCompression::test_mpu_compress", + "unit/launcher/test_run.py::test_parser_errors", "unit/launcher/test_run.py::test_num_plus_parser", + "unit/launcher/test_run.py::test_parser_multinode", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_schedule[linear-19]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_decay_schedule[linear-33]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_decay_schedule[log-19]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_schedule[log-19]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_decay_schedule[log-10]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_schedule[log-33]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_schedule[linear-33]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_decay_schedule[linear-19]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_decay_schedule[log-33]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_decay_schedule[log-15]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_schedule[linear-10]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_schedule[log-10]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_decay_schedule[linear-10]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_schedule[linear-15]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_schedule[log-15]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_decay_schedule[linear-15]", + "unit/runtime/pipe/test_topology.py::test_topology_3d", + "unit/runtime/pipe/test_topology.py::test_topology_comm_list", + "unit/runtime/test_ds_config_dict.py::test_get_bfloat16_enabled[bfloat16]", + "unit/runtime/test_ds_config_dict.py::test_get_bfloat16_enabled[bf16]", + "unit/runtime/test_ds_config_dict.py::TestNoModel::test", + "unit/runtime/test_ds_config_dict.py::TestDeprecatedDeepScaleConfig::test", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestActivationCheckpoint::test_ckpt_inputs2_outputs3[mask1]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestActivationCheckpoint::test_ckpt_inputs2_outputs2[mask0]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestActivationCheckpoint::test_ckpt_inputs2_outputs3[mask0]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestActivationCheckpoint::test_ckpt_inputs1_outputs1[mask1]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestActivationCheckpoint::test_ckpt_inputs2_outputs1[mask0]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestActivationCheckpoint::test_ckpt_inputs1_outputs1[mask0]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestActivationCheckpoint::test_ckpt_inputs2_outputs1[mask1]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestActivationCheckpoint::test_ckpt_arg_none[mask0]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestActivationCheckpoint::test_ckpt_arg_none[mask1]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestActivationCheckpoint::test_ckpt_inputs2_outputs2[mask1]", + "unit/launcher/test_ds_arguments.py::test_core_deepscale_arguments", + "unit/launcher/test_ds_arguments.py::test_no_ds_arguments_no_ds_parser", + "unit/checkpoint/test_latest_checkpoint.py::TestLatestCheckpoint::test_missing_latest", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[None-None]", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[optimizer_type2-None]", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[Optimizer-scheduler_type2]", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[Optimizer-_LRScheduler]", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[optimizer_type2-_LRScheduler]", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[optimizer_type2-scheduler_type2]", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[None-_LRScheduler]", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[Optimizer-None]", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[None-scheduler_type2]", + "unit/runtime/utils/test_partition.py::test_balance_bert", + "unit/runtime/zero/test_zero_config.py::test_zero_config_offload_configs", + "unit/runtime/zero/test_zero_config.py::test_zero_offload_optimizer_config_pipeline", + "unit/runtime/test_pld.py::test_pld_schedule[0]", "unit/runtime/test_pld.py::test_pld_schedule[0.9]", + "unit/runtime/test_pld.py::test_pld_schedule[1.0]", "unit/runtime/test_pld.py::test_pld_schedule[0.1]", + "unit/runtime/test_pld.py::TestPLDModel::test_pld_model[0]", + "unit/runtime/test_pld.py::TestPLDModel::test_pld_model[1.0]", + "unit/runtime/test_pld.py::TestPLDModel::test_pld_model[0.9]", + "unit/runtime/test_pld.py::TestPLDModel::test_pld_model[0.1]", + "unit/autotuning/test_autotuning.py::test_autotuner_resources[active_resources3]", + "unit/autotuning/test_autotuning.py::test_autotuner_resources[active_resources1]", + "unit/autotuning/test_autotuning.py::test_autotuner_resources[active_resources2]", + "unit/autotuning/test_autotuning.py::test_autotuner_resources[active_resources0]", + "unit/elasticity/test_elastic.py::test_basic_10k", + "unit/checkpoint/test_reshape_checkpoint.py::test_reshape_222_to_111", + "unit/checkpoint/test_reshape_checkpoint.py::test_reshape_222_to_121", + "unit/runtime/test_ds_config_model.py::test_config_base_literalfail[config_dict0]", + "unit/runtime/test_ds_config_model.py::test_config_base_literalfail[config_dict1]", + "unit/runtime/test_ds_config_model.py::test_config_base_literalfail[config_dict2]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_laststage[3]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_firststage[8]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_firststage[10]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_laststage[10]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_laststage[1]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_laststage[8]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_firststage[1]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_firststage[3]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[facebook/opt-125m-text-generation-fp32-noCG]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[dslim/bert-base-NER-token-classification-fp32-CG]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[j-hartmann/emotion-english-distilroberta-base-text-classification-bf16-noCG]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-bf16-CG]", + "unit/inference/test_model_profiling.py::TestModelProfiling::test[True-False-roberta-base-fill-mask]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[Norod78/hebrew-bad_wiki-gpt_neo-tiny-text-generation-fp16-CG]", + "unit/runtime/half_precision/test_bf16.py::TestZeroEmptyGrad::test", + "unit/checkpoint/test_latest_checkpoint.py::TestLatestCheckpoint::test_existing_latest", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[roberta-base-fill-mask-fp16-CG]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[Norod78/hebrew-bad_wiki-gpt_neo-tiny-text-generation-fp16-CG]" +] + +hpu_promote_tests_4cards = [ + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[Adam-False-True-False-resulting_optimizer9]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[AdamW-True-True-False-resulting_optimizer3]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[Adam-False-True-True-resulting_optimizer13]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[AdamW-False-True-False-resulting_optimizer1]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[AdamW-True-True-True-resulting_optimizer7]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[AdamW-False-True-True-resulting_optimizer5]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[Adam-True-True-True-resulting_optimizer15]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[Adam-True-True-False-resulting_optimizer11]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[AdamW-True-False-False-resulting_optimizer2]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[Adam-False-False-False-resulting_optimizer8]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[AdamW-False-False-False-resulting_optimizer0]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[Adam-True-False-True-resulting_optimizer14]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[Adam-False-False-True-resulting_optimizer12]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[AdamW-False-False-True-resulting_optimizer4]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[AdamW-True-False-True-resulting_optimizer6]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[Adam-True-False-False-resulting_optimizer10]", + "unit/runtime/half_precision/test_bf16.py::TestZeroSupportedClientOptimizer::test[Adam]", + "unit/runtime/half_precision/test_fp16.py::TestZeroSupportedClientOptimizer::test[Adam-1]", + "unit/runtime/half_precision/test_fp16.py::TestZeroSupportedClientOptimizer::test[FusedAdam-1]", + "unit/runtime/half_precision/test_fp16.py::TestZeroSupportedClientOptimizer::test[Adam-2]", + "unit/runtime/half_precision/test_fp16.py::TestZeroSupportedClientOptimizer::test[Adam-3]", + "unit/runtime/half_precision/test_fp16.py::TestZeroSupportedClientOptimizer::test[FusedAdam-2]", + "unit/runtime/half_precision/test_fp16.py::TestZeroSupportedClientOptimizer::test[FusedAdam-3]", + "unit/runtime/zero/test_zero.py::TestZeroUnbalancedGradients::test[2]", + "unit/runtime/zero/test_zero.py::TestZeroUnbalancedGradients::test[1]", + "unit/runtime/zero/test_zero.py::TestZeroUnbalancedGradients::test[3]", + "unit/compression/test_compression.py::TestCompression::test_mpu_compress", + "unit/launcher/test_run.py::test_parser_errors", "unit/launcher/test_run.py::test_num_plus_parser", + "unit/launcher/test_run.py::test_parser_multinode", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_decay_schedule[linear-19]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_decay_schedule[log-33]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_schedule[log-10]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_decay_schedule[linear-10]", + "unit/runtime/pipe/test_topology.py::test_topology_3d", + "unit/runtime/pipe/test_topology.py::test_topology_comm_list", + "unit/runtime/test_ds_config_dict.py::test_get_bfloat16_enabled[bfloat16]", + "unit/runtime/test_ds_config_dict.py::test_get_bfloat16_enabled[bf16]", + "unit/runtime/test_ds_config_dict.py::TestNoModel::test", + "unit/runtime/test_ds_config_dict.py::TestDeprecatedDeepScaleConfig::test", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestActivationCheckpoint::test_ckpt_inputs2_outputs3[mask0]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestActivationCheckpoint::test_ckpt_inputs1_outputs1[mask1]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestActivationCheckpoint::test_ckpt_inputs2_outputs1[mask0]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestActivationCheckpoint::test_ckpt_arg_none[mask0]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestActivationCheckpoint::test_ckpt_arg_none[mask1]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestActivationCheckpoint::test_ckpt_inputs2_outputs2[mask1]", + "unit/launcher/test_ds_arguments.py::test_core_deepscale_arguments", + "unit/launcher/test_ds_arguments.py::test_no_ds_arguments_no_ds_parser", + "unit/checkpoint/test_latest_checkpoint.py::TestLatestCheckpoint::test_missing_latest", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[optimizer_type2-None]", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[Optimizer-scheduler_type2]", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[Optimizer-_LRScheduler]", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[optimizer_type2-_LRScheduler]", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[optimizer_type2-scheduler_type2]", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[None-_LRScheduler]", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[None-scheduler_type2]", + "unit/runtime/utils/test_partition.py::test_balance_bert", + "unit/runtime/zero/test_zero_config.py::test_zero_config_offload_configs", + "unit/runtime/zero/test_zero_config.py::test_zero_offload_optimizer_config_pipeline", + "unit/runtime/test_pld.py::test_pld_schedule[0]", "unit/runtime/test_pld.py::test_pld_schedule[0.9]", + "unit/runtime/test_pld.py::test_pld_schedule[1.0]", "unit/runtime/test_pld.py::test_pld_schedule[0.1]", + "unit/runtime/test_pld.py::TestPLDModel::test_pld_model[1.0]", + "unit/autotuning/test_autotuning.py::test_autotuner_resources[active_resources3]", + "unit/autotuning/test_autotuning.py::test_autotuner_resources[active_resources1]", + "unit/autotuning/test_autotuning.py::test_autotuner_resources[active_resources2]", + "unit/autotuning/test_autotuning.py::test_autotuner_resources[active_resources0]", + "unit/elasticity/test_elastic.py::test_basic_10k", + "unit/checkpoint/test_reshape_checkpoint.py::test_reshape_222_to_111", + "unit/checkpoint/test_reshape_checkpoint.py::test_reshape_222_to_121", + "unit/runtime/test_ds_config_model.py::test_config_base_literalfail[config_dict0]", + "unit/runtime/test_ds_config_model.py::test_config_base_literalfail[config_dict1]", + "unit/runtime/test_ds_config_model.py::test_config_base_literalfail[config_dict2]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_laststage[3]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_firststage[8]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_firststage[10]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_laststage[10]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_laststage[1]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_laststage[8]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_firststage[1]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_firststage[3]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[facebook/opt-125m-text-generation-fp32-noCG]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[dslim/bert-base-NER-token-classification-fp32-CG]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[j-hartmann/emotion-english-distilroberta-base-text-classification-bf16-noCG]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-bf16-CG]", + "unit/inference/test_model_profiling.py::TestModelProfiling::test[True-False-roberta-base-fill-mask]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[Norod78/hebrew-bad_wiki-gpt_neo-tiny-text-generation-fp16-CG]", + "unit/runtime/half_precision/test_bf16.py::TestZeroEmptyGrad::test", + "unit/checkpoint/test_latest_checkpoint.py::TestLatestCheckpoint::test_existing_latest", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[roberta-base-fill-mask-fp16-CG]", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[Norod78/hebrew-bad_wiki-gpt_neo-tiny-text-generation-fp16-CG]", + "unit/runtime/test_ds_initialize.py::TestOptimizerImplementation::test[None-fp16-zero1]", + "unit/runtime/test_ds_initialize.py::TestOptimizerImplementation::test[None-bf16-zero2]", + "unit/runtime/test_ds_initialize.py::TestOptimizerImplementation::test[fp32-bf16-None]", + "unit/runtime/zero/test_zero.py::TestZeroAdamOptimizerStepCount::test[1]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[2]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_optimizer_state[1]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[1]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_immediate_save_load[3]", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[2-False]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[1-False-Adam]", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[2-True]", + "unit/runtime/pipe/test_pipe.py::TestPipeCifar10::test_pipe_base[topo_config2]", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShard::test[bigscience/bloom-560m-fp16]", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[none-bigscience/bloom-560m-zero_stage=3-bsz=1]", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[cpu-bigscience/bloom-560m-zero_stage=2-bsz=1]", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[cpu-facebook/opt-350m-zero_stage=2-bsz=1]", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShard::test[facebook/opt-125m-fp16]", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShard::test[EleutherAI/gpt-neo-125M-fp16]" +] + +gpu_ci_tests = [ + "unit/runtime/test_ds_initialize.py::TestClientOptimizer::test[None]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_train_schedule_singlestage", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_midstage[10]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_midstage[8]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_schedule_firststage", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_midstage[3]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_midstage[1]", + "unit/launcher/test_ds_arguments.py::test_no_ds_arguments", + "unit/launcher/test_ds_arguments.py::test_no_ds_enable_argument", + "unit/runtime/test_ds_config_model.py::test_config_base", + "unit/comm/test_dist.py::TestWorldSizeOverrideDistTest::test_world_size_1", + "unit/checkpoint/test_reshape_checkpoint.py::test_reshape_222_to_211", + "unit/checkpoint/test_reshape_checkpoint.py::test_reshape_222_to_122", + "unit/inference/test_inference.py::TestModelTask::test[roberta-large-fill-mask-bf16-noCG]", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-multilingual-uncased-fill-mask-fp32-noCG]", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp32-CG]", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-uncased-fill-mask-bf16-CG]", + "unit/runtime/zero/test_zero_tiled.py::test_tiled_init[2-2]", + "unit/runtime/zero/test_zero_tiled.py::test_tiled_baddim[33-33]", + "unit/runtime/zero/test_zero_tiled.py::test_tiled_baddim[0-0]", + "unit/runtime/zero/test_zero_tiled.py::test_tiled_init[1-1]", + "unit/runtime/zero/test_zero_tiled.py::test_tiled_init[32-32]", + "unit/runtime/zero/test_zero_tiled.py::test_tiled_init[5-5]", + "unit/runtime/half_precision/test_bf16.py::TestZeroAllowUntestedOptimizer::test", + "unit/runtime/test_ds_config_dict.py::TestInitNoOptimizer::test", + "unit/runtime/test_ds_config_dict.py::TestDistInit::test", "unit/launcher/test_run.py::test_parser_local", + "unit/launcher/test_run.py::test_parser_mutual_exclusive", + "unit/runtime/test_lr_schedulers.py::TestSchedulerOptimizerParity::test[WarmupLR-params0]", + "unit/runtime/test_lr_schedulers.py::TestSchedulerOptimizerParity::test[OneCycle-params2]", + "unit/runtime/test_lr_schedulers.py::TestSchedulerOptimizerParity::test[LRRangeTest-params3]", + "unit/runtime/test_lr_schedulers.py::TestSchedulerOptimizerParity::test[WarmupDecayLR-params1]", + "unit/runtime/test_lr_schedulers.py::TestOneCycle::test_mom[0.08-0.09-0.001-100]", + "unit/runtime/test_lr_schedulers.py::TestOneCycle::test_lr[0.001-0.1-0.1-21-21]", + "unit/runtime/test_lr_schedulers.py::TestOneCycle::test_mom[0.08-0.09-0-211]", + "unit/runtime/test_lr_schedulers.py::TestOneCycle::test_lr[1e-05-0.1-0-10-0]", + "unit/runtime/test_lr_schedulers.py::TestOneCycle::test_lr[0.001-0.1-0-21-21]", + "unit/runtime/test_lr_schedulers.py::TestOneCycle::test_mom[0.08-0.09-0-210]", + "unit/runtime/test_lr_schedulers.py::TestOneCycle::test_mom[0.08-0.09-0.001-101]", + "unit/runtime/test_lr_schedulers.py::TestOneCycle::test_lr[1e-05-0.01-0.001-10-100]", + "unit/runtime/test_lr_schedulers.py::TestOneCycle::test_lr[1e-05-0.01-0.001-10-101]", + "unit/runtime/utils/test_partition.py::test_float_balanced", + "unit/runtime/utils/test_partition.py::test_int_balanced", + "unit/runtime/utils/test_partition.py::test_easy_balance_uniform", + "unit/runtime/utils/test_partition.py::test_float_midheavy", + "unit/runtime/utils/test_partition.py::test_short_partition_uniform", + "unit/autotuning/test_autotuning.py::test_resource_manager_arg_mappings[arg_mappings2]", + "unit/autotuning/test_autotuning.py::test_command_line", + "unit/autotuning/test_autotuning.py::test_resource_manager_arg_mappings[arg_mappings4]", + "unit/autotuning/test_autotuning.py::test_resource_manager_arg_mappings[arg_mappings3]", + "unit/autotuning/test_autotuning.py::test_resource_manager_arg_mappings[None]", + "unit/autotuning/test_autotuning.py::test_resource_manager_arg_mappings[arg_mappings1]", + "unit/checkpoint/test_latest_checkpoint.py::TestLatestCheckpoint::test_existing_latest", + "unit/runtime/test_pld.py::TestNonPLDModel::test_non_pld_model", + "unit/runtime/zero/test_zero_config.py::test_zero_config_deprecatedfields", + "unit/runtime/zero/test_zero_config.py::test_zero_config_aliasfields", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestCheckpointNonTensor::test_ckpt_non_tensor_output[None]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestCheckpointNonTensor::test_ckpt_non_tensor_input[None]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestCheckpointNonTensorOutputOrdering::test_ckpt_non_tensor_output_ordering[non_tensor_output3]", + "unit/runtime/test_ds_initialize.py::TestClientOptimizer::test[Optimizer]", + "unit/runtime/test_ds_initialize.py::TestClientOptimizer::test[optimizer_type2]", + "unit/elasticity/test_elastic.py::test_proper_mbsz", "unit/runtime/pipe/test_topology.py::test_topology_rank_repr", + "unit/runtime/pipe/test_topology.py::test_topology_2d", "unit/runtime/pipe/test_topology.py::test_primes", + "unit/runtime/sparse_tensor/test_csr.py::test_csr_addition_different", + "unit/utils/test_get_optim_files.py::test_get_optim_files[2]", + "unit/utils/test_get_optim_files.py::test_get_optim_files[12]", + "unit/utils/test_get_optim_files.py::test_get_optim_files[24]", + "unit/utils/test_get_optim_files.py::test_get_optim_files[1]", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-multilingual-uncased-fill-mask-fp32-noCG]", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp32-CG]", + "unit/inference/test_inference.py::TestModelTask::test[roberta-large-fill-mask-bf16-noCG]", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-uncased-fill-mask-bf16-CG]", + "unit/inference/test_inference.py::TestModelTask::test[gpt2-text-generation-fp16-CG]", + "unit/inference/test_inference.py::TestModelTask::test[gpt2-text-generation-fp16-noCG]" +] + +gpu_promote_tests = [ + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[Adam-False-True-False-resulting_optimizer9]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[AdamW-True-True-False-resulting_optimizer3]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[Adam-False-True-True-resulting_optimizer13]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[AdamW-False-True-False-resulting_optimizer1]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[AdamW-True-True-True-resulting_optimizer7]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[AdamW-False-True-True-resulting_optimizer5]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[Adam-True-True-True-resulting_optimizer15]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[Adam-True-True-False-resulting_optimizer11]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[AdamW-True-False-False-resulting_optimizer2]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[Adam-False-False-False-resulting_optimizer8]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[AdamW-False-False-False-resulting_optimizer0]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[Adam-True-False-True-resulting_optimizer14]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[Adam-False-False-True-resulting_optimizer12]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[AdamW-False-False-True-resulting_optimizer4]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[AdamW-True-False-True-resulting_optimizer6]", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[Adam-True-False-False-resulting_optimizer10]", + "unit/runtime/half_precision/test_bf16.py::TestZeroSupportedClientOptimizer::test[Adam]", + "unit/runtime/half_precision/test_bf16.py::TestZeroSupportedClientOptimizer::test[FusedAdam]", + "unit/runtime/half_precision/test_fp16.py::TestZeroSupportedClientOptimizer::test[Adam-1]", + "unit/runtime/half_precision/test_fp16.py::TestZeroSupportedClientOptimizer::test[FusedAdam-1]", + "unit/runtime/half_precision/test_fp16.py::TestZeroSupportedClientOptimizer::test[Adam-2]", + "unit/runtime/half_precision/test_fp16.py::TestZeroSupportedClientOptimizer::test[Adam-3]", + "unit/runtime/half_precision/test_fp16.py::TestZeroSupportedClientOptimizer::test[FusedAdam-2]", + "unit/runtime/half_precision/test_fp16.py::TestZeroSupportedClientOptimizer::test[FusedAdam-3]", + "unit/runtime/zero/test_zero.py::TestZeroUnbalancedGradients::test[2]", + "unit/runtime/zero/test_zero.py::TestZeroUnbalancedGradients::test[1]", + "unit/runtime/zero/test_zero.py::TestZeroUnbalancedGradients::test[3]", + "unit/compression/test_compression.py::TestCompression::test_mpu_compress", + "unit/launcher/test_run.py::test_parser_errors", "unit/launcher/test_run.py::test_num_plus_parser", + "unit/launcher/test_run.py::test_parser_multinode", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_schedule[linear-19]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_decay_schedule[linear-33]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_decay_schedule[log-19]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_schedule[log-19]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_decay_schedule[log-10]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_schedule[log-33]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_schedule[linear-33]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_decay_schedule[linear-19]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_decay_schedule[log-33]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_decay_schedule[log-15]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_schedule[linear-10]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_schedule[log-10]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_decay_schedule[linear-10]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_schedule[linear-15]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_schedule[log-15]", + "unit/runtime/test_lr_schedulers.py::TestLrSchedule::test_lr_warmup_decay_schedule[linear-15]", + "unit/runtime/pipe/test_topology.py::test_topology_3d", + "unit/runtime/pipe/test_topology.py::test_topology_comm_list", + "unit/runtime/test_ds_config_dict.py::test_get_bfloat16_enabled[bfloat16]", + "unit/runtime/test_ds_config_dict.py::test_get_bfloat16_enabled[bf16]", + "unit/runtime/test_ds_config_dict.py::TestNoModel::test", + "unit/runtime/test_ds_config_dict.py::TestDeprecatedDeepScaleConfig::test", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestActivationCheckpoint::test_ckpt_inputs2_outputs3[mask1]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestActivationCheckpoint::test_ckpt_inputs2_outputs2[mask0]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestActivationCheckpoint::test_ckpt_inputs2_outputs3[mask0]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestActivationCheckpoint::test_ckpt_inputs1_outputs1[mask1]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestActivationCheckpoint::test_ckpt_inputs2_outputs1[mask0]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestActivationCheckpoint::test_ckpt_inputs1_outputs1[mask0]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestActivationCheckpoint::test_ckpt_inputs2_outputs1[mask1]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestActivationCheckpoint::test_ckpt_arg_none[mask0]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestActivationCheckpoint::test_ckpt_arg_none[mask1]", + "unit/runtime/activation_checkpointing/test_activation_checkpointing.py::TestActivationCheckpoint::test_ckpt_inputs2_outputs2[mask1]", + "unit/launcher/test_ds_arguments.py::test_core_deepscale_arguments", + "unit/launcher/test_ds_arguments.py::test_no_ds_arguments_no_ds_parser", + "unit/checkpoint/test_latest_checkpoint.py::TestLatestCheckpoint::test_missing_latest", + "unit/compression/test_compression.py::TestCompression::test_conv1d_convertion", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[None-None]", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[optimizer_type2-None]", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[Optimizer-scheduler_type2]", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[Optimizer-_LRScheduler]", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[optimizer_type2-_LRScheduler]", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[optimizer_type2-scheduler_type2]", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[None-_LRScheduler]", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[Optimizer-None]", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[None-scheduler_type2]", + "unit/runtime/utils/test_partition.py::test_balance_bert", + "unit/runtime/zero/test_zero_config.py::test_zero_config_offload_configs", + "unit/runtime/zero/test_zero_config.py::test_zero_offload_optimizer_config_pipeline", + "unit/runtime/test_pld.py::test_pld_schedule[0]", "unit/runtime/test_pld.py::test_pld_schedule[0.9]", + "unit/runtime/test_pld.py::test_pld_schedule[1.0]", "unit/runtime/test_pld.py::test_pld_schedule[0.1]", + "unit/runtime/test_pld.py::TestPLDModel::test_pld_model[0]", + "unit/runtime/test_pld.py::TestPLDModel::test_pld_model[1.0]", + "unit/runtime/test_pld.py::TestPLDModel::test_pld_model[0.9]", + "unit/runtime/test_pld.py::TestPLDModel::test_pld_model[0.1]", + "unit/autotuning/test_autotuning.py::test_autotuner_resources[active_resources3]", + "unit/autotuning/test_autotuning.py::test_autotuner_resources[active_resources1]", + "unit/autotuning/test_autotuning.py::test_autotuner_resources[active_resources2]", + "unit/autotuning/test_autotuning.py::test_autotuner_resources[active_resources0]", + "unit/elasticity/test_elastic.py::test_basic_10k", + "unit/checkpoint/test_reshape_checkpoint.py::test_reshape_222_to_111", + "unit/checkpoint/test_reshape_checkpoint.py::test_reshape_222_to_121", + "unit/runtime/test_ds_config_model.py::test_config_base_literalfail[config_dict0]", + "unit/runtime/test_ds_config_model.py::test_config_base_literalfail[config_dict1]", + "unit/runtime/test_ds_config_model.py::test_config_base_literalfail[config_dict2]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_laststage[3]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_firststage[8]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_firststage[10]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_laststage[10]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_laststage[1]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_laststage[8]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_firststage[1]", + "unit/runtime/pipe/test_pipe_schedule.py::test_pipe_inference_schedule_firststage[3]", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-125m-text-generation-fp32-noCG]", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp32-CG]", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-bf16-noCG]", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-bf16-CG]", + "unit/inference/test_model_profiling.py::TestModelProfiling::test[True-False-roberta-base-fill-mask]", + "unit/inference/test_inference.py::TestModelTask::test[Norod78/hebrew-bad_wiki-gpt_neo-tiny-text-generation-fp16-CG]", + "unit/runtime/half_precision/test_bf16.py::TestZeroEmptyGrad::test", + "unit/checkpoint/test_latest_checkpoint.py::TestLatestCheckpoint::test_existing_latest", + "unit/inference/test_inference.py::TestModelTask::test[roberta-base-fill-mask-fp16-CG]", + "unit/inference/test_inference.py::TestModelTask::test[Norod78/hebrew-bad_wiki-gpt_neo-tiny-text-generation-fp16-CG]" +] diff --git a/tests/unit/common.py b/tests/unit/common.py index 76bebf6b725a..081eef8e90f4 100644 --- a/tests/unit/common.py +++ b/tests/unit/common.py @@ -23,7 +23,7 @@ from _pytest.fixtures import FixtureLookupError, FixtureFunctionMarker # Worker timeout for tests that hang -DEEPSPEED_TEST_TIMEOUT = 600 +DEEPSPEED_TEST_TIMEOUT = int(os.environ.get('DEEPSPEED_TEST_TIMEOUT', '600')) def is_rocm_pytorch(): @@ -59,6 +59,7 @@ def get_master_port(base_port=29500, port_range_size=1000): def set_accelerator_visible(): + # below function relevant for GPU cuda_visible = os.environ.get("CUDA_VISIBLE_DEVICES", None) xdist_worker_id = get_xdist_worker_id() if xdist_worker_id is None: @@ -84,13 +85,25 @@ def set_accelerator_visible(): elif get_accelerator().device_name() == 'npu': npu_smi = subprocess.check_output(['npu-smi', 'info', '-l']) num_accelerators = int(npu_smi.decode('utf-8').strip().split('\n')[0].split(':')[1].strip()) + elif get_accelerator().device_name() == 'hpu': + try: + hl_smi = subprocess.check_output(['hl-smi', "-L"]) + num_accelerators = re.findall(r"Module ID\s+:\s+(\d+)", hl_smi.decode()) + except FileNotFoundError: + sim_list = subprocess.check_output(['ls', '-1', '/dev/accel']) + num_accelerators = re.findall(r"accel(\d+)", sim_list.decode()) + num_accelerators = sorted(num_accelerators, key=int) + os.environ["HABANA_VISIBLE_MODULES"] = ",".join(num_accelerators) else: assert get_accelerator().device_name() == 'cpu' cpu_sockets = int( subprocess.check_output('cat /proc/cpuinfo | grep "physical id" | sort -u | wc -l', shell=True)) num_accelerators = cpu_sockets - cuda_visible = ",".join(map(str, range(num_accelerators))) + if isinstance(num_accelerators, list): + cuda_visible = ",".join(num_accelerators) + else: + cuda_visible = ",".join(map(str, range(num_accelerators))) # rotate list based on xdist worker id, example below # wid=0 -> ['0', '1', '2', '3'] @@ -147,6 +160,10 @@ def _get_fixture_kwargs(self, request, func): return fixture_kwargs def _launch_daemonic_procs(self, num_procs): + if get_accelerator().device_name() == 'hpu': + if self.reuse_dist_env: + print("Ignoring reuse_dist_env for hpu") + self.reuse_dist_env = False # Create process pool or use cached one master_port = None if self.reuse_dist_env: @@ -170,8 +187,9 @@ def _launch_daemonic_procs(self, num_procs): # hang (causing super long unit test runtimes) pytest.exit("Test hanged, exiting", returncode=1) - # Tear down distributed environment and close process pools - self._close_pool(pool, num_procs) + finally: + # Tear down distributed environment and close process pools + self._close_pool(pool, num_procs) # If we skipped a test, propagate that to this process if any(skip_msgs): @@ -184,10 +202,13 @@ def _launch_non_daemonic_procs(self, num_procs): master_port = get_master_port() skip_msg = mp.Queue() # Allows forked processes to share pytest.skip reason processes = [] + prev_start_method = mp.get_start_method() + mp.set_start_method('spawn', force=True) for local_rank in range(num_procs): p = mp.Process(target=self._dist_run, args=(local_rank, num_procs, master_port, skip_msg)) p.start() processes.append(p) + mp.set_start_method(prev_start_method, force=True) # Now loop and wait for a test to complete. The spin-wait here isn't a big # deal because the number of processes will be O(#GPUs) << O(#CPUs). @@ -284,6 +305,7 @@ def _dist_run(self, local_rank, num_procs, master_port, skip_msg=""): def _dist_destroy(self): if (dist is not None) and dist.is_initialized(): dist.barrier() + # tear down after test completes dist.destroy_process_group() def _close_pool(self, pool, num_procs, force=False): @@ -424,7 +446,7 @@ def __call__(self, request): world_size = mark.args[0] break else: - world_size = self.world_size + world_size = self._fixture_kwargs.get("world_size", self.world_size) if isinstance(world_size, int): world_size = [world_size] diff --git a/tests/unit/compile_marker.py b/tests/unit/compile_marker.py new file mode 100644 index 000000000000..526b5c378ba5 --- /dev/null +++ b/tests/unit/compile_marker.py @@ -0,0 +1,346 @@ +# Copyright (c) 2023 Habana Labs, Ltd. an Intel Company +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +compile_tests = [ + "unit/runtime/compile/test_compile_wrapper.py::TestCustomMethod::test_custom_function", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-2-dtype2]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[cpu-2-dtype2]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[none-1-dtype0]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[none-2-dtype0]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[none-3-dtype0]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[cpu-2-dtype1]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[none-2-dtype2]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[cpu-3-dtype1]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-3-dtype1]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[cpu-3-dtype2]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-3-dtype0]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[cpu-2-dtype0]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[none-3-dtype1]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[cpu-1-dtype1]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-3-dtype2]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[cpu-3-dtype0]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[cpu-1-dtype2]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[cpu-1-dtype0]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[none-2-dtype1]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[none-3-dtype2]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[none-1-dtype1]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[none-1-dtype2]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-1-dtype1]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-2-dtype1]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-1-dtype2]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-1-dtype0]", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-2-dtype0]", + "unit/runtime/compile/test_load_config.py::TestConfigLoad::test_custom_backend", + "unit/runtime/compile/test_load_config.py::TestConfigLoad::test_set_compiler_fn", + "unit/runtime/compile/test_load_config.py::TestConfigLoad::test_compile", + "unit/runtime/compile/test_load_config.py::TestConfigLoad::test_compile_disabled", + "unit/runtime/compile/test_load_config.py::TestConfigLoad::test_compile_kwargs", + "unit/runtime/compile/test_load_config.py::TestConfigLoad::test_set_compile_kwargs", + "unit/checkpoint/test_latest_checkpoint.py::TestLatestCheckpoint::test_existing_latest[True] PASSED", + "unit/checkpoint/test_latest_checkpoint.py::TestLatestCheckpoint::test_missing_latest[True] PASSED", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[2-True-True]", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[1-False-True]", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[1-False-True]", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[3-True-True]", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[2-False-True]", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[0-False-True]", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[0-False-True]", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[3-False-True]", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[2-True-True]", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[3-True-True]", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[3-False-True]", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[2-False-True]", + "unit/checkpoint/test_pipeline.py::TestPipelineCheckpoint::test_checkpoint_pipe_engine[0-True]", + "unit/checkpoint/test_pipeline.py::TestPipelineCheckpoint::test_checkpoint_pipe_engine[1-True]", + "unit/checkpoint/test_shared_weights.py::TestCheckpointSharedWeights::test_checkpoint_shared_weights[True]", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[4-True-True]", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[2-False-True]", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[4-False-True]", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[2-True-True]", + "unit/checkpoint/test_tag_validation.py::TestCheckpointValidationTag::test_checkpoint_unique_tag[WARN-True]", + "unit/checkpoint/test_tag_validation.py::TestCheckpointValidationTag::test_checkpoint_unique_tag[IGNORE-True]", + "unit/checkpoint/test_tag_validation.py::TestCheckpointValidationTag::test_checkpoint_unique_tag[FAIL-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_hybrid_optimizer_state[2-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[3-False-Adam-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[1-False-Adam-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[0-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[1-False-Adam-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[2-True-deepspeed_adam-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[2-False-Adam-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[2-True-deepspeed_adam-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[1-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[3-False-Adam-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[3-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[3-True-deepspeed_adam-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_pipeline_checkpoint_loading[3-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[3-True-deepspeed_adam-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[2-False-Adam-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_hybrid_optimizer_state[1-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[2-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[1-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_immediate_save_load[0-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[2-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[3-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_immediate_save_load[3-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[3-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[2-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_immediate_save_load[2-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_immediate_save_load[1-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[0-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[0-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[1-True]", + "unit/checkpoint/test_zero_optimizer.py::TestSaveTensorClone::test_save_tensor_clone[True-2-True]", + "unit/checkpoint/test_zero_optimizer.py::TestSaveTensorClone::test_save_tensor_clone[True-1-True]", + "unit/checkpoint/test_zero_optimizer.py::TestSaveTensorClone::test_save_tensor_clone[False-1-True]", + "unit/checkpoint/test_zero_optimizer.py::TestSaveTensorClone::test_save_tensor_clone[False-2-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_module_only[1-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_optimizer_state[1-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_not_load_optimizer_state[1-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_custom_frozen_weights[1-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_module_only[2-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_frozen_weights[1-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_frozen_weights[2-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_optimizer_state[3-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_module_only[3-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_optimizer_state[2-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_custom_frozen_weights[2-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_not_load_optimizer_state[3-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_not_load_optimizer_state[2-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeRONonDistributed::test_chmod_exception_handling[2-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeRONonDistributed::test_chmod_exception_handling[3-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeRONonDistributed::test_chmod_exception_handling[1-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[True-False-False-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[False-False-False-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[True-False-True-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-True-True-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[True-True-True-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[False-True-True-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-True-False-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[True-True-False-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-False-True-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-False-False-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-True-True-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[False-True-False-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-True-False-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-False-True-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[False-False-True-True]", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-False-False-True]", + "unit/checkpoint/test_latest_checkpoint.py::TestLatestCheckpoint::test_existing_latest[True]", + "unit/checkpoint/test_latest_checkpoint.py::TestLatestCheckpoint::test_missing_latest[True]", + "unit/checkpoint/test_other_optimizer.py::TestOtherOptimizerCheckpoint::test_checkpoint_fp32_optimizer[True]", + "unit/checkpoint/test_other_optimizer.py::TestOtherOptimizerCheckpoint::test_checkpoint_fused_optimizer[True]", + "unit/checkpoint/test_tag_validation.py::TestCheckpointValidationTag::test_checkpoint_unique_tag[WARN-True]", + "unit/checkpoint/test_tag_validation.py::TestCheckpointValidationTag::test_checkpoint_unique_tag[FAIL-True]", + "unit/checkpoint/test_tag_validation.py::TestCheckpointValidationTag::test_checkpoint_unique_tag[IGNORE-True]", + "unit/checkpoint/test_tag_validation.py::TestCheckpointValidationTag::test_checkpoint_unknown_tag_validation[True]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[True-False-True-1-dtype2]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[True-False-True-1-dtype0]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[True-True-True-1-dtype0]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[True-True-False-1-dtype1]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[True-False-False-1-dtype0]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[True-True-True-1-dtype2]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[True-False-True-1-dtype1]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[True-False-False-1-dtype2]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[True-False-False-1-dtype2]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[True-True-True-1-dtype1]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[True-True-True-1-dtype2]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[True-False-True-1-dtype0]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[True-False-False-1-dtype1]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[True-True-False-1-dtype2]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[True-True-False-1-dtype0]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[True-True-True-1-dtype2]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[True-False-True-1-dtype0]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[True-True-True-1-dtype1]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[True-False-False-1-dtype0]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[True-False-True-1-dtype1]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[True-False-False-1-dtype0]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[True-True-True-1-dtype1]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[True-True-True-1-dtype0]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[True-True-False-1-dtype2]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[True-False-False-1-dtype1]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[True-True-False-1-dtype0]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[True-False-True-1-dtype2]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[True-False-False-1-dtype2]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[True-True-False-1-dtype2]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[True-True-False-1-dtype1]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[True-True-True-1-dtype0]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[True-False-False-1-dtype1]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[True-False-True-1-dtype2]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[True-True-False-1-dtype1]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[True-True-False-1-dtype0]", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[True-False-True-1-dtype1]", + "unit/checkpoint/test_sparse.py::TestSparseCheckpoint::test_non_strict_load_sparse[True-True-True-False-False]", + "unit/checkpoint/test_sparse.py::TestSparseCheckpoint::test_non_strict_load_sparse[True-True-False-False-False]", + "unit/checkpoint/test_sparse.py::TestSparseCheckpoint::test_non_strict_load_sparse[True-True-True-True-True]", + "unit/checkpoint/test_sparse.py::TestSparseCheckpoint::test_non_strict_load_sparse[True-False-False-False-False]", + "unit/checkpoint/test_sparse.py::TestSparseCheckpoint::test_non_strict_load_sparse[True-True-True-True-False]", + "unit/checkpoint/test_sparse.py::TestSparseCheckpoint::test_non_strict_load_sparse[True-True-False-True-True]", + "unit/checkpoint/test_sparse.py::TestSparseCheckpoint::test_non_strict_load_sparse[True-True-False-True-False]", + "unit/checkpoint/test_sparse.py::TestSparseCheckpoint::test_non_strict_load_sparse[True-False-False-True-True]", + "unit/checkpoint/test_sparse.py::TestSparseCheckpoint::test_non_strict_load_sparse[True-False-False-True-False]", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp16-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp32-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[distilbert/distilgpt2-text-generation-fp32-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[distilbert/distilgpt2-text-generation-fp16-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[openai-community/gpt2-text-generation-fp32-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp32-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp32-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp16-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp32-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp16-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp16-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp32-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp32-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp32-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[openai-community/gpt2-text-generation-fp16-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp32-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp32-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp16-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp32-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp16-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp32-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp32-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp16-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp16-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[Norod78/hebrew-bad_wiki-gpt_neo-tiny-text-generation-fp16-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp32-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp16-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp16-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp16-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[Norod78/hebrew-bad_wiki-gpt_neo-tiny-text-generation-fp32-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp32-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp16-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp16-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp32-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp32-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[distilbert/distilgpt2-text-generation-fp32-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp32-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp32-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[distilbert/distilgpt2-text-generation-fp16-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp32-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp16-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp16-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp16-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp16-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp16-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp32-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp32-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp32-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp32-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp16-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp32-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp32-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[Norod78/hebrew-bad_wiki-gpt_neo-tiny-text-generation-fp32-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp32-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[Norod78/hebrew-bad_wiki-gpt_neo-tiny-text-generation-fp16-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp32-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp16-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp32-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[openai-community/gpt2-text-generation-fp32-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp16-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[openai-community/gpt2-text-generation-fp16-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp16-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp16-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp16-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-125m-text-generation-fp16-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp16-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp16-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-125m-text-generation-fp32-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp16-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp16-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp16-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[bigscience/bloom-560m-text-generation-fp16-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp16-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp16-CG-noTriton-False-True]", + "unit/inference/test_inference.py::TestLMCorrectness::test[lambada_standard-gpt2-EleutherAI/gpt-neo-2.7B-True]", + "unit/inference/test_inference.py::TestLMCorrectness::test[lambada_standard-gpt2-openai-community/gpt2-xl-True]", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp16-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp32-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp32-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp32-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp32-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp16-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-125m-text-generation-fp16-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp32-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp32-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp32-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp16-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-125m-text-generation-fp32-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp16-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp16-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp32-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp32-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp16-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp16-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp16-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp32-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp16-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp32-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp16-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp32-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp16-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp16-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp32-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp32-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp32-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp16-noCG-noTriton-False-True]", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp16-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp16-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp32-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp16-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp16-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp16-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp16-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp32-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp32-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp16-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp32-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp16-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp16-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp16-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp16-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp32-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp16-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp32-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp16-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp16-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp32-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp32-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp16-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp32-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp16-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp16-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp32-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp32-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp32-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp32-CG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp32-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp16-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp16-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp32-noCG-noTriton-True-True]", + "unit/inference/test_inference.py::TestMPSize::test[fp32-gpt-neo-True]", + "unit/inference/test_inference.py::TestMPSize::test[fp16-gpt-neo-True]", + "unit/inference/test_inference.py::TestInjectionPolicy::test[fp32-roberta-True]", + "unit/inference/test_inference.py::TestInjectionPolicy::test[fp32-t5-True]", + "unit/inference/test_inference.py::TestAutoTensorParallelism::test_odd_world_size[fp16-marian-True]", + "unit/inference/test_inference.py::TestAutoTensorParallelism::test[fp16-marian-True]", + "unit/inference/test_inference.py::TestAutoTensorParallelism::test[bf16-marian-True]", + "unit/inference/test_inference.py::TestAutoTensorParallelism::test_odd_world_size[bf16-marian-True]", + "unit/inference/test_inference.py::TestMPSize::test[fp16-bloom-True]", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShard::test[facebook/opt-125m-fp16-True]", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShard::test[EleutherAI/gpt-j-6B-fp16-True]", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShard::test[EleutherAI/gpt-neo-125M-fp16-True]", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShard::test[facebook/opt-350m-fp16-True]", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShardinAutoTP::test[facebook/opt-125m-True]", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShardinAutoTP::test[bigscience/bloom-560m-True]", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShardinAutoTP::test[EleutherAI/gpt-neo-125M-True]", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShardinAutoTP::test[EleutherAI/gpt-j-6B-True]", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShardinAutoTP::test[facebook/opt-350m-True]", +] diff --git a/tests/unit/elasticity/test_elastic.py b/tests/unit/elasticity/test_elastic.py index a49ec595a420..dbd4f40c9b95 100644 --- a/tests/unit/elasticity/test_elastic.py +++ b/tests/unit/elasticity/test_elastic.py @@ -9,7 +9,7 @@ from deepspeed.git_version_info import version as ds_version import os from unit.simple_model import SimpleModel -from deepspeed.ops.op_builder import FusedAdamBuilder +from deepspeed.ops.op_builder import FusedAdamBuilder, FusedLambBuilder if not deepspeed.ops.__compatible_ops__[FusedAdamBuilder.NAME]: pytest.skip("This op had not been implemented on this system.", allow_module_level=True) @@ -150,6 +150,7 @@ def test_proper_mbsz(ds_config): class TestNonElasticBatchParams(DistributedTest): world_size = 2 + @pytest.mark.skipif(not deepspeed.ops.__compatible_ops__[FusedLambBuilder.NAME], reason="lamb is not compatible") def test(self): config_dict = { "train_batch_size": 2, @@ -182,6 +183,7 @@ def test(self): class TestNonElasticBatchParamsWithOverride(DistributedTest): world_size = 2 + @pytest.mark.skipif(not deepspeed.ops.__compatible_ops__[FusedLambBuilder.NAME], reason="lamb is not compatible") def test(self): config_dict = { "train_batch_size": 2, @@ -213,6 +215,7 @@ def test(self): class TestElasticConfigChanged(DistributedTest): world_size = 2 + @pytest.mark.skipif(not deepspeed.ops.__compatible_ops__[FusedLambBuilder.NAME], reason="lamb is not compatible") def test(self): config_dict = { "train_batch_size": 2, diff --git a/tests/unit/hybrid_engine/test_he_all.py b/tests/unit/hybrid_engine/test_he_all.py index aa1f120645b1..af336ebf8c2f 100644 --- a/tests/unit/hybrid_engine/test_he_all.py +++ b/tests/unit/hybrid_engine/test_he_all.py @@ -43,8 +43,10 @@ def get_model(self, model_name): model_config = AutoConfig.from_pretrained(model_name) model_config.dropout = 0.0 model = AutoModelForCausalLM.from_pretrained(model_name, config=model_config) - model = model.half() - model = model.to(f'{get_accelerator().device_name()}:{local_rank}') + dev = get_accelerator().device_name() + dtype = torch.float16 + model = model.to(dtype=dtype) + model = model.to(f'{dev}:{local_rank}') return model def get_tokenizer(self, model_name): @@ -70,8 +72,8 @@ def test_correctness(self, batch_size, model_name): base_out = self._generate(model, tokenizer, prompt) ds_config = {"train_batch_size": 1, "fp16": {"enabled": True}, "hybrid_engine": {"enabled": True}} - model, *_ = deepspeed.initialize(model=model, config=ds_config) + model, *_ = deepspeed.initialize(model=model, config=ds_config) model.eval() ds1_out = self._generate(model, tokenizer, prompt) assert base_out == ds1_out, f"base_out: {base_out}, ds1_out: {ds1_out}" diff --git a/tests/unit/hybrid_engine/test_he_llama.py b/tests/unit/hybrid_engine/test_he_llama.py index fcf5b8ffb89b..716c327d53af 100644 --- a/tests/unit/hybrid_engine/test_he_llama.py +++ b/tests/unit/hybrid_engine/test_he_llama.py @@ -46,8 +46,10 @@ def get_model(self, model_name): model = AutoModelForCausalLM.from_pretrained(model_name, config=model_config) # Make the model smaller so we can run it on a single GPU in CI _ = [model.model.layers.pop(-1) for _ in range(8)] - model = model.half() - model = model.to(f'{get_accelerator().device_name()}:{local_rank}') + dev = get_accelerator().device_name() + dtype = torch.float16 + model = model.to(dtype=dtype) + model = model.to(f'{dev}:{local_rank}') return model def get_tokenizer(self, model_name): diff --git a/tests/unit/hybrid_engine/test_he_lora.py b/tests/unit/hybrid_engine/test_he_lora.py index ea27239ed55e..0108d285d46f 100644 --- a/tests/unit/hybrid_engine/test_he_lora.py +++ b/tests/unit/hybrid_engine/test_he_lora.py @@ -15,6 +15,7 @@ import numpy.testing as npt from unit.common import DistributedTest from deepspeed.ops.op_builder import InferenceBuilder +from deepspeed.accelerator import get_accelerator if not deepspeed.ops.__compatible_ops__[InferenceBuilder.NAME]: pytest.skip("This op had not been implemented on this system.", allow_module_level=True) @@ -119,13 +120,17 @@ def only_optimize_lora_parameters(model): class TestHybridEngineLoRA(DistributedTest): world_size = 1 - def get_model(self, model_name): + def get_model(self, model_name, replace_fp16_to_bf16=False): local_rank = int(os.getenv("LOCAL_RANK", "0")) model_config = AutoConfig.from_pretrained(model_name) model_config.dropout = 0.0 model = AutoModelForCausalLM.from_pretrained(model_name, config=model_config) - model = model.half() - model = model.to(f'cuda:{local_rank}') + if replace_fp16_to_bf16: + model = model.bfloat16() + else: + model = model.half() + device = get_accelerator().device_name() + model = model.to(f'{device}:{local_rank}') return model def get_tokenizer(self, model_name): @@ -146,8 +151,9 @@ def get_train_sentences(self, batch_size): raise NotImplementedError(f"batch_size {batch_size} not implemented") def test_lora(self, batch_size, model_name, zero_stage, offload_device): + replace_fp16_to_bf16 = False local_rank = int(os.getenv("LOCAL_RANK", "0")) - model = self.get_model(model_name) + model = self.get_model(model_name, replace_fp16_to_bf16) tokenizer = self.get_tokenizer(model_name) train_sentences = self.get_train_sentences(batch_size) @@ -180,6 +186,9 @@ def test_lora(self, batch_size, model_name, zero_stage, offload_device): } } + if replace_fp16_to_bf16: + ds_config["fp16"]["enabled"] = False + ds_config["bf16"] = {"enabled": True} model, *_ = deepspeed.initialize(model=model, config=ds_config) # Verify gradient norm is larger than 0 @@ -190,7 +199,8 @@ def test_lora(self, batch_size, model_name, zero_stage, offload_device): model.train() batch = tokenizer(train_sentences, max_length=16, padding="max_length", truncation=True, return_tensors="pt") - batch = to_device(batch, f'cuda:{local_rank}') + device = get_accelerator().device_name() + batch = to_device(batch, f'{device}:{local_rank}') batch["labels"] = batch["input_ids"] outputs = model(**batch, use_cache=False) loss = outputs.loss diff --git a/tests/unit/inference/test_checkpoint_sharding.py b/tests/unit/inference/test_checkpoint_sharding.py index 564b3fab6bf4..9ef45406d9e8 100644 --- a/tests/unit/inference/test_checkpoint_sharding.py +++ b/tests/unit/inference/test_checkpoint_sharding.py @@ -14,6 +14,7 @@ from huggingface_hub import snapshot_download from transformers.utils import is_offline_mode from deepspeed.ops.op_builder import InferenceBuilder +from deepspeed.accelerator import get_accelerator if not deepspeed.ops.__compatible_ops__[InferenceBuilder.NAME]: pytest.skip("This op had not been implemented on this system.", allow_module_level=True) @@ -44,6 +45,8 @@ def model_name(request): @pytest.fixture(params=[torch.float16, torch.int8], ids=["fp16", "int8"]) def dtype(request): + if request.param not in get_accelerator().supported_dtypes(): + pytest.skip(f"{request.param} not supported by {get_accelerator().device_name()}.") return request.param @@ -73,7 +76,9 @@ def run(self, model_name, class_tmpdir): class TestCheckpointShard(DistributedTest): world_size = 2 - def test(self, model_name, dtype, class_tmpdir, save_shard): + @pytest.mark.parametrize('compile_mode', [True, False]) + def test(self, model_name, dtype, class_tmpdir, save_shard, compile_mode): + world_size = int(os.getenv("WORLD_SIZE", "1")) inf_config = { "replace_with_kernel_inject": True, @@ -92,6 +97,8 @@ def test(self, model_name, dtype, class_tmpdir, save_shard): model = AutoModelForCausalLM.from_config(model_config, torch_dtype=torch.bfloat16) model = model.eval() model = deepspeed.init_inference(model, config=inf_config) + if compile_mode: + model.compile() check_dtype(model, dtype) @@ -99,7 +106,8 @@ def test(self, model_name, dtype, class_tmpdir, save_shard): class TestCheckpointShardinAutoTP(DistributedTest): world_size = 2 - def test(self, model_name, class_tmpdir): + @pytest.mark.parametrize('compile_mode', [True, False]) + def test(self, model_name, class_tmpdir, compile_mode): def write_checkpoints_json(model_name, class_tmpdir): import json @@ -137,3 +145,5 @@ def write_checkpoints_json(model_name, class_tmpdir): model = AutoModelForCausalLM.from_config(model_config, torch_dtype=torch.bfloat16) model = model.eval() model = deepspeed.init_inference(model, config=inf_config) + if compile_mode: + model.compile() diff --git a/tests/unit/inference/test_inference.py b/tests/unit/inference/test_inference.py index f3056a225a9b..29549366f2e3 100644 --- a/tests/unit/inference/test_inference.py +++ b/tests/unit/inference/test_inference.py @@ -30,6 +30,7 @@ from deepspeed.ops.op_builder import OpBuilder from unit.common import DistributedTest +from transformers import BertLayer rocm_version = OpBuilder.installed_rocm_version() if rocm_version != (0, 0): @@ -67,6 +68,36 @@ "facebook/opt-125m", # 125m, 1.7B, ..., 175B variants have the same model architecture. "facebook/opt-350m", # 350m applies layer norm after attention layer which is different than other variants. ] +ModelsInjectionPolicyMap = { + "distilbert/distilbert-base-cased-distilled-squad": { + BertLayer: ("output_layer_norm", ) + }, + "openai-community/gpt2": { + BertLayer: ("mlp", ) + }, + "distilbert/distilgpt2": { + BertLayer: ("mlp", ) + }, + "Norod78/hebrew-bad_wiki-gpt_neo-tiny": { + BertLayer: ("out_proj", ) + }, + "EleutherAI/gpt-j-6b": { + BertLayer: ("mlp", ) + }, + "EleutherAI/pythia-70m-deduped": { + BertLayer: ("mlp", ) + }, + "bigscience/bloom-560m": { + BertLayer: ("mlp", ) + }, + "facebook/opt-125m": { + BertLayer: ("out_proj", ) + }, + "facebook/opt-350m": { + BertLayer: ("out_proj", ) + }, +} +DEFAULT_INJECTION_POLICY = {BertLayer: ("output.dense", )} _test_models = set(_bert_models + _roberta_models + _gpt_models + _opt_models) _test_tasks = [ "fill-mask", "question-answering", "text-classification", "token-classification", "text-generation", @@ -84,9 +115,10 @@ class ModelInfo: def _hf_model_list() -> List[ModelInfo]: """ Caches HF model list to avoid repeated API calls """ - cache_dir = os.getenv("TRANSFORMERS_CACHE", "~/.cache/huggingface") + cache_dir = os.getenv("HF_HOME", "~/.cache/huggingface") cache_file_path = os.path.join(cache_dir, "DS_model_cache.pkl") - cache_expiration_seconds = 60 * 60 * 24 # 1 day + num_days = os.getenv("HF_CACHE_EXPIRY_DAYS", 1) + cache_expiration_seconds = num_days * 60 * 60 * 24 # Load or initialize the cache model_data = {"cache_time": 0, "model_list": []} @@ -97,7 +129,8 @@ def _hf_model_list() -> List[ModelInfo]: current_time = time.time() # Update the cache if it has expired - if (model_data["cache_time"] + cache_expiration_seconds) < current_time: + if ((model_data["cache_time"] + cache_expiration_seconds) < current_time) or os.getenv("FORCE_UPDATE_HF_CACHE", + default=False): api = HfApi() model_data["model_list"] = [ ModelInfo(modelId=m.modelId, pipeline_tag=m.pipeline_tag, tags=m.tags) for m in api.list_models() @@ -125,7 +158,8 @@ def _hf_model_list() -> List[ModelInfo]: pytest.mt_names = [f"{m}-{t}" for m, t in pytest.model_w_tasks] -@pytest.fixture(scope="module", autouse=True) +#Hugging Face model: WA. Hugging Face models were updated, causing the _test_models list to not be found in _hf_model_names. Changed the fixture from True to False. +@pytest.fixture(scope="module", autouse=False) def verify_models(): # Verify all test models are registered in HF _test_models_not_found = [m for m in _test_models if m not in _hf_model_names] @@ -162,6 +196,11 @@ def enable_triton(request): return request.param +@pytest.fixture(params=[1, 2], ids=["ws1", "ws2"]) +def world_size(request): + return request.param + + """ Fixtures for running query """ @@ -279,7 +318,7 @@ def verify_injection(module): def validate_test(model_w_task, dtype, enable_cuda_graph, enable_triton): model, task = model_w_task msg = "" - if enable_cuda_graph and (torch_info["cuda_version"] == "0.0"): + if enable_cuda_graph and (torch_info["cuda_version"] == "0.0") and get_accelerator().device_name() != 'hpu': msg = "CUDA not detected, cannot use CUDA Graph" elif enable_cuda_graph and pkg_version.parse(torch.__version__) < pkg_version.parse("1.10"): msg = "CUDA Graph is only available in torch versions >= 1.10" @@ -296,6 +335,8 @@ def validate_test(model_w_task, dtype, enable_cuda_graph, enable_triton): msg = f"Bloom models only support half precision, cannot use dtype {dtype}" elif (model not in _bert_models + _roberta_models) and enable_cuda_graph: msg = "Non bert/roberta models do no support CUDA Graph" + elif not get_accelerator().is_triton_supported() and enable_triton: + msg = f"Triton is not supported for {get_accelerator().device_name()}." elif enable_triton and not (dtype in [torch.half]): msg = "Triton is for fp16" elif enable_triton and not deepspeed.HAS_TRITON: @@ -311,7 +352,9 @@ def validate_test(model_w_task, dtype, enable_cuda_graph, enable_triton): return msg -@pytest.mark.inference +@pytest.mark.parametrize('compile_mode', [True, False]) +@pytest.mark.parametrize("replace_with_kernel_inject", [True, False]) +@pytest.mark.nightly class TestModelTask(DistributedTest): world_size = 1 @@ -324,6 +367,8 @@ def test( query, inf_kwargs, assert_fn, + replace_with_kernel_inject, + compile_mode, perf_meas=True, ): invalid_test_msg = validate_test(model_w_task, dtype, enable_cuda_graph, enable_triton) @@ -366,11 +411,20 @@ def test( 'use_triton': enable_triton, 'triton_autotune': False, } + if not replace_with_kernel_inject: + if get_accelerator().device_name() != 'hpu': + pytest.skip("Kernel Inject False validation for HPU tests.", ) + injection_policy = ModelsInjectionPolicyMap.get(model, DEFAULT_INJECTION_POLICY) + args['injection_policy'] = injection_policy + args['replace_with_kernel_inject'] = False if pipe.tokenizer.model_max_length < deepspeed.ops.transformer.inference.config.DeepSpeedInferenceConfig( ).max_out_tokens: args.update({'max_out_tokens': pipe.tokenizer.model_max_length}) pipe.model = deepspeed.init_inference(pipe.model, **args) - check_injection(pipe.model) + if compile_mode: + pipe.model.compile() + if replace_with_kernel_inject: + check_injection(pipe.model) # Warm-up queries for perf measurement #for i in range(10): # _ = pipe(query, **inf_kwargs) @@ -397,6 +451,7 @@ def test( assert assert_fn(bs_output, ds_output) +@pytest.mark.parametrize('compile_mode', [True, False]) @pytest.mark.seq_inference @pytest.mark.parametrize("model_w_task", [("EleutherAI/gpt-neo-1.3B", "text-generation"), ("EleutherAI/gpt-neox-20b", "text-generation"), @@ -413,6 +468,7 @@ def test( query, inf_kwargs, assert_fn, + compile_mode, ): invalid_test_msg = validate_test(model_w_task, dtype, enable_cuda_graph=False, enable_triton=False) if invalid_test_msg: @@ -428,11 +484,12 @@ def test( # enough GPU memory pipe = pipeline(task, model=model, device=torch.device("cpu"), framework="pt") bs_output = pipe(query, **inf_kwargs) - pipe.model = deepspeed.init_inference(pipe.model, mp_size=self.world_size, dtype=dtype, replace_with_kernel_inject=True) + if compile_mode: + pipe.model.compile() check_injection(pipe.model) # Switch device to GPU so that input tensors are not on CPU pipe.device = torch.device(get_accelerator().device_name(local_rank)) @@ -443,6 +500,7 @@ def test( assert assert_fn(bs_output, ds_output) +@pytest.mark.parametrize('compile_mode', [True, False]) @pytest.mark.inference @pytest.mark.parametrize("model_w_task", [("openai-community/gpt2", "text-generation")], ids=["gpt2"]) class TestLowCpuMemUsage(DistributedTest): @@ -454,27 +512,34 @@ def test( query, inf_kwargs, assert_fn, + compile_mode, ): model, task = model_w_task dtype = torch.float16 + if os.getenv("REPLACE_FP16", default=None): + dtype = torch.bfloat16 if dtype not in get_accelerator().supported_dtypes(): pytest.skip(f"Acceleraor {get_accelerator().device_name()} does not support {dtype}.") local_rank = int(os.getenv("LOCAL_RANK", "0")) - - pipe = pipeline(task, model=model, model_kwargs={"low_cpu_mem_usage": True}, device=local_rank, framework="pt") + device = local_rank + if get_accelerator().device_name() != 'cuda': + device = torch.device(f"{get_accelerator().device_name()}") + pipe = pipeline(task, model=model, model_kwargs={"low_cpu_mem_usage": True}, device=device, framework="pt") bs_output = pipe(query, **inf_kwargs) pipe.model = deepspeed.init_inference(pipe.model, mp_size=self.world_size, dtype=dtype, replace_method="auto", replace_with_kernel_inject=True) - + if compile_mode: + pipe.model.compile() ds_output = pipe(query, **inf_kwargs) assert assert_fn(bs_output, ds_output) +@pytest.mark.parametrize('compile_mode', [True, False]) @pytest.mark.seq_inference @pytest.mark.parametrize( "model_w_task, injection_policy", @@ -490,7 +555,6 @@ def test( ) @pytest.mark.parametrize("dtype", [torch.float], ids=["fp32"]) class TestInjectionPolicy(DistributedTest): - world_size = [1, 2] def test( self, @@ -500,6 +564,7 @@ def test( inf_kwargs, assert_fn, dtype, + compile_mode, ): invalid_test_msg = validate_test(model_w_task, dtype, enable_cuda_graph=False, enable_triton=False) if invalid_test_msg: @@ -507,7 +572,6 @@ def test( model, task = model_w_task local_rank = int(os.getenv("LOCAL_RANK", "0")) - world_size = int(os.getenv("WORLD_SIZE", "2")) pipe = pipeline(task, model=model, @@ -519,6 +583,8 @@ def test( mp_size=world_size, dtype=dtype, injection_policy=injection_policy) + if compile_mode: + pipe.model.compile() ds_output = pipe(query, **inf_kwargs) print(local_rank, "baseline", bs_output) @@ -526,6 +592,7 @@ def test( assert assert_fn(bs_output, ds_output) +@pytest.mark.parametrize('compile_mode', [True, False]) @pytest.mark.seq_inference @pytest.mark.parametrize( "model_w_task", @@ -543,6 +610,7 @@ def test( inf_kwargs, assert_fn, dtype, + compile_mode, ): invalid_test_msg = validate_test(model_w_task, dtype, enable_cuda_graph=False, enable_triton=False) if invalid_test_msg: @@ -566,6 +634,8 @@ def test( bs_output = pipe(query, **inf_kwargs) pipe.model = deepspeed.init_inference(pipe.model, mp_size=world_size, dtype=dtype) + if compile_mode: + pipe.model.compile() ds_output = pipe(query, **inf_kwargs) print(local_rank, "baseline", bs_output) @@ -580,6 +650,7 @@ def test_odd_world_size( inf_kwargs, assert_fn, dtype, + compile_mode, ): invalid_test_msg = validate_test(model_w_task, dtype, enable_cuda_graph=False, enable_triton=False) if invalid_test_msg: @@ -598,6 +669,8 @@ def test_odd_world_size( bs_output = pipe(query, **inf_kwargs) pipe.model = deepspeed.init_inference(pipe.model, mp_size=world_size, dtype=dtype) + if compile_mode: + pipe.model.compile() ds_output = pipe(query, **inf_kwargs) print(local_rank, "baseline", bs_output) @@ -605,6 +678,7 @@ def test_odd_world_size( assert assert_fn(bs_output, ds_output) +@pytest.mark.parametrize('compile_mode', [True, False]) @pytest.mark.nightly @pytest.mark.parametrize( "model_family, model_name", @@ -619,7 +693,7 @@ class TestLMCorrectness(DistributedTest): world_size = 1 exec_timeout = 1200 # Give these tests longer to complete - def test(self, model_family, model_name, task): + def test(self, model_family, model_name, task, compile_mode): # imports here to avoid import errors when pytest collects tests import lm_eval import lm_eval.models @@ -648,13 +722,22 @@ def no_pool_bootstrap_stderr(f, xs, iters): if 'gpt-j-6b' in model_name: dtype = torch.half + if os.getenv("REPLACE_FP16", default=None): + dtype = torch.bfloat16 lm = lm_eval.models.get_model(model_family).create_from_arg_string(f"pretrained={model_name}", {"device": "cpu"}) - setattr(lm, model_family, getattr(lm, model_family).half().to(device)) + setattr(lm, model_family, getattr(lm, model_family).to(dtype=dtype).to(device)) lm._device = device else: - lm = lm_eval.models.get_model(model_family).create_from_arg_string( - f"pretrained={model_name}", {"device": get_accelerator().device_name()}) + if get_accelerator().device_name() == 'hpu': + #lm_eval not supporting HPU device, so get model with CPU and move it to HPU. + lm = lm_eval.models.get_model(model_family).create_from_arg_string(f"pretrained={model_name}", + {"device": "cpu"}) + setattr(lm, model_family, getattr(lm, model_family).to(device)) + lm._device = device + else: + lm = lm_eval.models.get_model(model_family).create_from_arg_string( + f"pretrained={model_name}", {"device": get_accelerator().device_name()}) get_accelerator().synchronize() start = time.time() @@ -670,6 +753,8 @@ def no_pool_bootstrap_stderr(f, xs, iters): replace_with_kernel_inject=True, enable_cuda_graph=False, ) + if compile_mode: + ds_model.compile() check_injection(ds_model) setattr(lm, model_family, ds_model) get_accelerator().synchronize() diff --git a/tests/unit/inference/test_inference_config.py b/tests/unit/inference/test_inference_config.py index 39d62d17372c..7172ee2a7f21 100644 --- a/tests/unit/inference/test_inference_config.py +++ b/tests/unit/inference/test_inference_config.py @@ -17,7 +17,6 @@ class TestInferenceConfig(DistributedTest): def test_overlap_kwargs(self): config = {"replace_with_kernel_inject": True, "dtype": torch.float32} kwargs = {"replace_with_kernel_inject": True} - engine = deepspeed.init_inference(torch.nn.Module(), config=config, **kwargs) assert engine._config.replace_with_kernel_inject diff --git a/tests/unit/inference/test_model_profiling.py b/tests/unit/inference/test_model_profiling.py index 23e49f89025b..9ae96ed12fcf 100644 --- a/tests/unit/inference/test_model_profiling.py +++ b/tests/unit/inference/test_model_profiling.py @@ -16,6 +16,9 @@ if not deepspeed.ops.__compatible_ops__[InferenceBuilder.NAME]: pytest.skip("This op had not been implemented on this system.", allow_module_level=True) +if torch.half not in get_accelerator().supported_dtypes(): + pytest.skip(f"fp16 not supported, valid dtype: {get_accelerator().supported_dtypes()}", allow_module_level=True) + @pytest.mark.inference @pytest.mark.parametrize("use_cuda_events", [True, False]) @@ -31,7 +34,6 @@ def test(self, enable_cuda_graph, use_cuda_events): local_rank = int(os.getenv("LOCAL_RANK", "0")) world_size = int(os.getenv("WORLD_SIZE", "1")) - pipe = pipeline(task, model, framework="pt", device=get_accelerator().device_name(local_rank)) pipe.model = deepspeed.init_inference(pipe.model, dtype=dtype, diff --git a/tests/unit/inference/test_stable_diffusion.py b/tests/unit/inference/test_stable_diffusion.py index ac39b7ab12fa..d71d947c3b4b 100644 --- a/tests/unit/inference/test_stable_diffusion.py +++ b/tests/unit/inference/test_stable_diffusion.py @@ -20,22 +20,23 @@ class TestStableDiffusion(DistributedTest): def test(self): from diffusers import DiffusionPipeline from image_similarity_measures.quality_metrics import rmse - generator = torch.Generator(device=get_accelerator().current_device()) + dev = get_accelerator().device_name() + generator = torch.Generator(device=dev) seed = 0xABEDABE7 generator.manual_seed(seed) prompt = "a dog on a rocket" model = "prompthero/midjourney-v4-diffusion" local_rank = int(os.getenv("LOCAL_RANK", "0")) - device = torch.device(f"cuda:{local_rank}") - - pipe = DiffusionPipeline.from_pretrained(model, torch_dtype=torch.half) + device = torch.device(f"{dev}:{local_rank}") + dtype = torch.half + pipe = DiffusionPipeline.from_pretrained(model, torch_dtype=dtype) pipe = pipe.to(device) baseline_image = pipe(prompt, guidance_scale=7.5, generator=generator).images[0] pipe = deepspeed.init_inference( pipe, mp_size=1, - dtype=torch.half, + dtype=dtype, replace_with_kernel_inject=True, enable_cuda_graph=True, ) diff --git a/tests/unit/inference/v2/inference_test_utils.py b/tests/unit/inference/v2/inference_test_utils.py index d63c51267e51..9405b6fde724 100644 --- a/tests/unit/inference/v2/inference_test_utils.py +++ b/tests/unit/inference/v2/inference_test_utils.py @@ -44,3 +44,10 @@ def allclose(x, y, tolerances: Tuple[int, int] = None): else: rtol, atol = tolerances return torch.allclose(x, y, rtol=rtol, atol=atol) + + +def skip_on_inference_v2(): + if get_accelerator().device_name() == 'hpu': + return True + else: + return False diff --git a/tests/unit/inference/v2/kernels/core_ops/test_bias_activation.py b/tests/unit/inference/v2/kernels/core_ops/test_bias_activation.py index 376188b92565..49dbdc715556 100644 --- a/tests/unit/inference/v2/kernels/core_ops/test_bias_activation.py +++ b/tests/unit/inference/v2/kernels/core_ops/test_bias_activation.py @@ -11,7 +11,10 @@ from deepspeed.accelerator import get_accelerator from deepspeed.inference.v2.inference_utils import ActivationType, DtypeEnum from deepspeed.inference.v2.kernels.core_ops import CUDABiasActivation -from ....v2.inference_test_utils import get_dtypes, allclose +from ....v2.inference_test_utils import get_dtypes, allclose, skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') def reference_bias_act_implementation(input: torch.Tensor, bias: Optional[torch.Tensor], diff --git a/tests/unit/inference/v2/kernels/core_ops/test_blas_linear.py b/tests/unit/inference/v2/kernels/core_ops/test_blas_linear.py index 864db6204a16..9d8d2c177607 100644 --- a/tests/unit/inference/v2/kernels/core_ops/test_blas_linear.py +++ b/tests/unit/inference/v2/kernels/core_ops/test_blas_linear.py @@ -10,7 +10,10 @@ from deepspeed.accelerator import get_accelerator from deepspeed.inference.v2.kernels.core_ops import BlasLibLinear -from ....v2.inference_test_utils import allclose +from ....v2.inference_test_utils import allclose, skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') # Note: only testing with FP16 and BF16 because we use TF32 on Ampere and we don't have a good # set of tolerances. Since this is just on top of BLAS though, the test is more about diff --git a/tests/unit/inference/v2/kernels/core_ops/test_gated_activation.py b/tests/unit/inference/v2/kernels/core_ops/test_gated_activation.py index 8cb95a6cdcba..4232cb7a6324 100644 --- a/tests/unit/inference/v2/kernels/core_ops/test_gated_activation.py +++ b/tests/unit/inference/v2/kernels/core_ops/test_gated_activation.py @@ -11,7 +11,10 @@ from deepspeed.accelerator import get_accelerator from deepspeed.inference.v2.kernels.core_ops import CUDAGatedActivation from deepspeed.inference.v2.inference_utils import ActivationType -from ....v2.inference_test_utils import get_dtypes, allclose +from ....v2.inference_test_utils import get_dtypes, allclose, skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') def reference_geglu_implementation(input: torch.Tensor, diff --git a/tests/unit/inference/v2/kernels/core_ops/test_post_ln.py b/tests/unit/inference/v2/kernels/core_ops/test_post_ln.py index 0b489894bb9b..0549316081ee 100644 --- a/tests/unit/inference/v2/kernels/core_ops/test_post_ln.py +++ b/tests/unit/inference/v2/kernels/core_ops/test_post_ln.py @@ -8,7 +8,10 @@ from deepspeed.accelerator import get_accelerator from deepspeed.inference.v2.kernels.core_ops import CUDAFPPostLN -from ....v2.inference_test_utils import get_dtypes, allclose +from ....v2.inference_test_utils import get_dtypes, allclose, skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') def reference_implementation(residual: torch.Tensor, hidden_states: torch.Tensor, gamma: torch.Tensor, diff --git a/tests/unit/inference/v2/kernels/core_ops/test_pre_ln.py b/tests/unit/inference/v2/kernels/core_ops/test_pre_ln.py index ffb748e57af2..4da5173d5f53 100644 --- a/tests/unit/inference/v2/kernels/core_ops/test_pre_ln.py +++ b/tests/unit/inference/v2/kernels/core_ops/test_pre_ln.py @@ -8,7 +8,10 @@ from deepspeed.accelerator import get_accelerator from deepspeed.inference.v2.kernels.core_ops import CUDAFPPreLN -from ....v2.inference_test_utils import get_dtypes, allclose +from ....v2.inference_test_utils import get_dtypes, allclose, skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') def reference_implementation(residual: torch.Tensor, hidden_states: torch.Tensor, gamma: torch.Tensor, diff --git a/tests/unit/inference/v2/kernels/core_ops/test_rms_norm.py b/tests/unit/inference/v2/kernels/core_ops/test_rms_norm.py index 63b16da171c9..16357d0f2967 100644 --- a/tests/unit/inference/v2/kernels/core_ops/test_rms_norm.py +++ b/tests/unit/inference/v2/kernels/core_ops/test_rms_norm.py @@ -9,7 +9,10 @@ from deepspeed.accelerator import get_accelerator from deepspeed.inference.v2.inference_utils import DtypeEnum from deepspeed.inference.v2.kernels.core_ops import CUDARMSNorm, CUDARMSPreNorm -from ....v2.inference_test_utils import get_dtypes, allclose +from ....v2.inference_test_utils import get_dtypes, allclose, skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') def reference_rms_norm(vals: torch.Tensor, gamma: torch.Tensor, epsilon: float = 1e-5) -> torch.Tensor: diff --git a/tests/unit/inference/v2/kernels/cutlass_ops/test_moe_gemm.py b/tests/unit/inference/v2/kernels/cutlass_ops/test_moe_gemm.py index ed76dabe1f4c..c396cc1268d0 100644 --- a/tests/unit/inference/v2/kernels/cutlass_ops/test_moe_gemm.py +++ b/tests/unit/inference/v2/kernels/cutlass_ops/test_moe_gemm.py @@ -9,7 +9,10 @@ from deepspeed.accelerator import get_accelerator from deepspeed.inference.v2.inference_utils import ActivationType, DtypeEnum from deepspeed.inference.v2.kernels.cutlass_ops import MoEGEMM -from ....v2.inference_test_utils import allclose +from ....v2.inference_test_utils import allclose, skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') SINGLE_EXPERT_CASES = [(13, 2048, 2048), (256, 1024, 4096), (278, 5120, 2048), (893, 5120, 2560)] diff --git a/tests/unit/inference/v2/kernels/ragged_ops/test_atom_builder.py b/tests/unit/inference/v2/kernels/ragged_ops/test_atom_builder.py index a33c938a0608..ad88ff2c5d69 100644 --- a/tests/unit/inference/v2/kernels/ragged_ops/test_atom_builder.py +++ b/tests/unit/inference/v2/kernels/ragged_ops/test_atom_builder.py @@ -6,8 +6,13 @@ import pytest import torch +from deepspeed.accelerator import get_accelerator from deepspeed.inference.v2.kernels.ragged_ops import AtomBuilder from .ragged_testing_utils import build_complex_batch +from ....v2.inference_test_utils import skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') Q_BLOCK_SIZE = 128 KV_BLOCK_SIZE = 128 diff --git a/tests/unit/inference/v2/kernels/ragged_ops/test_blocked_flash.py b/tests/unit/inference/v2/kernels/ragged_ops/test_blocked_flash.py index ce5a178c9548..bb0192bbbde6 100644 --- a/tests/unit/inference/v2/kernels/ragged_ops/test_blocked_flash.py +++ b/tests/unit/inference/v2/kernels/ragged_ops/test_blocked_flash.py @@ -23,7 +23,10 @@ from deepspeed.ops.op_builder import RaggedUtilsBuilder from .ragged_testing_utils import build_batch_and_manager -from ....v2.inference_test_utils import allclose +from ....v2.inference_test_utils import allclose, skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') try: from flash_attn.flash_attn_interface import flash_attn_varlen_func diff --git a/tests/unit/inference/v2/kernels/ragged_ops/test_blocked_kv_copy.py b/tests/unit/inference/v2/kernels/ragged_ops/test_blocked_kv_copy.py index 5f1ef930952c..03562e48390c 100644 --- a/tests/unit/inference/v2/kernels/ragged_ops/test_blocked_kv_copy.py +++ b/tests/unit/inference/v2/kernels/ragged_ops/test_blocked_kv_copy.py @@ -9,6 +9,10 @@ from deepspeed.accelerator import get_accelerator from deepspeed.inference.v2.kernels.ragged_ops import LinearBlockedKVCopy from .ragged_testing_utils import build_batch_and_manager, validate_kv_cache +from ....v2.inference_test_utils import skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') @pytest.mark.inference_v2_ops diff --git a/tests/unit/inference/v2/kernels/ragged_ops/test_blocked_rotary_emb.py b/tests/unit/inference/v2/kernels/ragged_ops/test_blocked_rotary_emb.py index 156be9929d92..06d67777e65d 100644 --- a/tests/unit/inference/v2/kernels/ragged_ops/test_blocked_rotary_emb.py +++ b/tests/unit/inference/v2/kernels/ragged_ops/test_blocked_rotary_emb.py @@ -12,7 +12,10 @@ from deepspeed.inference.v2.kernels.ragged_ops import BlockedRotaryEmbeddings, BlockedTrainedRotaryEmbeddings from deepspeed.inference.v2.ragged import RaggedBatchWrapper, DSSequenceDescriptor from .ragged_testing_utils import build_batch_and_manager, validate_kv_cache -from ....v2.inference_test_utils import allclose +from ....v2.inference_test_utils import allclose, skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') """ NOTE(cmikeh2): It is very possible to see unit test failures (even on FP16) depending on when certain values are casted up to or down from float32. If we are seeing accuracy issues, we should diff --git a/tests/unit/inference/v2/kernels/ragged_ops/test_logits_gather.py b/tests/unit/inference/v2/kernels/ragged_ops/test_logits_gather.py index 1feefa9ee588..e00aa85d194c 100644 --- a/tests/unit/inference/v2/kernels/ragged_ops/test_logits_gather.py +++ b/tests/unit/inference/v2/kernels/ragged_ops/test_logits_gather.py @@ -10,9 +10,12 @@ from deepspeed.accelerator import get_accelerator from deepspeed.inference.v2.kernels.ragged_ops import RaggedLogitsGather -from ....v2.inference_test_utils import allclose, get_dtypes +from ....v2.inference_test_utils import allclose, get_dtypes, skip_on_inference_v2 from .ragged_testing_utils import build_simple_batch +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') + def baseline_implementation(hidden_states: torch.Tensor, seq_lens: List[int]) -> torch.Tensor: output = torch.empty((len(seq_lens), hidden_states.shape[1]), diff --git a/tests/unit/inference/v2/kernels/ragged_ops/test_moe_gather.py b/tests/unit/inference/v2/kernels/ragged_ops/test_moe_gather.py index 3907fc3e3a4b..6538a81ec00a 100644 --- a/tests/unit/inference/v2/kernels/ragged_ops/test_moe_gather.py +++ b/tests/unit/inference/v2/kernels/ragged_ops/test_moe_gather.py @@ -14,6 +14,10 @@ RaggedTopKGating, ) from .ragged_testing_utils import build_simple_batch +from ....v2.inference_test_utils import skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') """ For simplicity's sake, these tests do rely on ``RaggedTopKGating`` and ``MoEScatter`` to produce correct inputs. If either of these kernels is broken diff --git a/tests/unit/inference/v2/kernels/ragged_ops/test_moe_scatter.py b/tests/unit/inference/v2/kernels/ragged_ops/test_moe_scatter.py index aae459f06a6f..9edc014eae33 100644 --- a/tests/unit/inference/v2/kernels/ragged_ops/test_moe_scatter.py +++ b/tests/unit/inference/v2/kernels/ragged_ops/test_moe_scatter.py @@ -10,6 +10,10 @@ from deepspeed.inference.v2.inference_utils import DtypeEnum from deepspeed.inference.v2.kernels.ragged_ops import MoEScatter, RaggedTopKGating from .ragged_testing_utils import build_simple_batch +from ....v2.inference_test_utils import skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') """ For simplicity's sake, these tests do rely on ``RaggedTopKGating`` to produce correct inputs. If ``RaggedTopKGating`` is broken, these tests will fail, so double check diff --git a/tests/unit/inference/v2/kernels/ragged_ops/test_ragged_embed.py b/tests/unit/inference/v2/kernels/ragged_ops/test_ragged_embed.py index f179f62a9b12..32d7d312a4cf 100644 --- a/tests/unit/inference/v2/kernels/ragged_ops/test_ragged_embed.py +++ b/tests/unit/inference/v2/kernels/ragged_ops/test_ragged_embed.py @@ -10,9 +10,12 @@ from deepspeed.accelerator import get_accelerator from deepspeed.inference.v2.kernels.ragged_ops import RaggedEmbeddingKernel -from ....v2.inference_test_utils import allclose, get_dtypes +from ....v2.inference_test_utils import allclose, get_dtypes, skip_on_inference_v2 from .ragged_testing_utils import build_batch_and_manager +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') + def baseline_implementation(token_ids: torch.Tensor, embedding_table: torch.Tensor, diff --git a/tests/unit/inference/v2/kernels/ragged_ops/test_top_k_gating.py b/tests/unit/inference/v2/kernels/ragged_ops/test_top_k_gating.py index 5fa0c8a079f0..178512351c0f 100644 --- a/tests/unit/inference/v2/kernels/ragged_ops/test_top_k_gating.py +++ b/tests/unit/inference/v2/kernels/ragged_ops/test_top_k_gating.py @@ -11,7 +11,10 @@ from deepspeed.inference.v2.inference_utils import DtypeEnum from deepspeed.inference.v2.kernels.ragged_ops import RaggedTopKGating from .ragged_testing_utils import build_simple_batch -from ...inference_test_utils import allclose +from ....v2.inference_test_utils import allclose, skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') def _top_k_gating_testing_helper(n_tokens: int, n_experts: int, n_top_k: int, seed: int = 0xC0FFEE) -> None: diff --git a/tests/unit/inference/v2/model_implementations/parameters/test_contiguify.py b/tests/unit/inference/v2/model_implementations/parameters/test_contiguify.py index 52ff0e134dfc..901d9d9b43e7 100644 --- a/tests/unit/inference/v2/model_implementations/parameters/test_contiguify.py +++ b/tests/unit/inference/v2/model_implementations/parameters/test_contiguify.py @@ -15,6 +15,10 @@ ) from deepspeed.inference.v2.model_implementations.layer_container_base import LayerContainer from .utils import SimpleParam, DummyInferenceModel +from ....v2.inference_test_utils import skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') class TransformerLayerContainer(LayerContainer): diff --git a/tests/unit/inference/v2/model_implementations/parameters/test_layer_inheritance.py b/tests/unit/inference/v2/model_implementations/parameters/test_layer_inheritance.py index 07ad87e6168d..c457227d5499 100644 --- a/tests/unit/inference/v2/model_implementations/parameters/test_layer_inheritance.py +++ b/tests/unit/inference/v2/model_implementations/parameters/test_layer_inheritance.py @@ -6,10 +6,15 @@ import pytest import torch +from deepspeed.accelerator import get_accelerator from deepspeed.inference.v2.inference_parameter import InferenceParameter from deepspeed.inference.v2.model_implementations.layer_container_base import LayerContainer from .utils import SimpleParam, DummyInferenceModel +from ....v2.inference_test_utils import skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') class ParentLayer(LayerContainer): diff --git a/tests/unit/inference/v2/model_implementations/parameters/test_mapping.py b/tests/unit/inference/v2/model_implementations/parameters/test_mapping.py index 52313cb6f202..0701b8dcc4d8 100644 --- a/tests/unit/inference/v2/model_implementations/parameters/test_mapping.py +++ b/tests/unit/inference/v2/model_implementations/parameters/test_mapping.py @@ -6,10 +6,15 @@ import pytest import torch +from deepspeed.accelerator import get_accelerator from deepspeed.inference.v2.allocator import on_device from deepspeed.inference.v2.inference_parameter import InferenceParameter from deepspeed.inference.v2.model_implementations.parameter_base import ParameterBase, ParamList from deepspeed.inference.v2.model_implementations.layer_container_base import LayerContainer +from ....v2.inference_test_utils import skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') class MultiDependencyContainer(ParameterBase): diff --git a/tests/unit/inference/v2/model_implementations/parameters/test_multi_parameter_layer.py b/tests/unit/inference/v2/model_implementations/parameters/test_multi_parameter_layer.py index b319bf6de4ad..e7ba08b3c2a8 100644 --- a/tests/unit/inference/v2/model_implementations/parameters/test_multi_parameter_layer.py +++ b/tests/unit/inference/v2/model_implementations/parameters/test_multi_parameter_layer.py @@ -6,10 +6,15 @@ import pytest import torch +from deepspeed.accelerator import get_accelerator from deepspeed.inference.v2.inference_parameter import InferenceParameter from deepspeed.inference.v2.model_implementations.layer_container_base import LayerContainer from .utils import validate_device, SimpleParam, ListParam, DummyInferenceModel +from ....v2.inference_test_utils import skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') class MultiParameterLayer(LayerContainer): diff --git a/tests/unit/inference/v2/model_implementations/parameters/test_parameter_list.py b/tests/unit/inference/v2/model_implementations/parameters/test_parameter_list.py index 06ff9047d648..5f39d3251ea9 100644 --- a/tests/unit/inference/v2/model_implementations/parameters/test_parameter_list.py +++ b/tests/unit/inference/v2/model_implementations/parameters/test_parameter_list.py @@ -6,6 +6,7 @@ import pytest import torch +from deepspeed.accelerator import get_accelerator from deepspeed.inference.v2.allocator import on_device from deepspeed.inference.v2.inference_parameter import InferenceParameter from deepspeed.inference.v2.model_implementations.parameter_base import ParameterBase, ParamList @@ -13,6 +14,10 @@ from deepspeed.inference.v2.model_implementations.common_parameters import * from .utils import validate_device +from ....v2.inference_test_utils import skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') class SimpleMoELayer(LayerContainer): diff --git a/tests/unit/inference/v2/model_implementations/sharding/test_attn_out_sharding.py b/tests/unit/inference/v2/model_implementations/sharding/test_attn_out_sharding.py index 850c4c24fde6..fb7901dbf938 100644 --- a/tests/unit/inference/v2/model_implementations/sharding/test_attn_out_sharding.py +++ b/tests/unit/inference/v2/model_implementations/sharding/test_attn_out_sharding.py @@ -8,6 +8,10 @@ from deepspeed.accelerator import get_accelerator from deepspeed.inference.v2.model_implementations.sharding import * +from ....v2.inference_test_utils import skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') # None of the logic should be dependent on head size. HEAD_SIZE = 64 diff --git a/tests/unit/inference/v2/model_implementations/sharding/test_mlp_sharding.py b/tests/unit/inference/v2/model_implementations/sharding/test_mlp_sharding.py index aac7e5391d8f..553d604d30ee 100644 --- a/tests/unit/inference/v2/model_implementations/sharding/test_mlp_sharding.py +++ b/tests/unit/inference/v2/model_implementations/sharding/test_mlp_sharding.py @@ -8,6 +8,10 @@ from deepspeed.accelerator import get_accelerator from deepspeed.inference.v2.model_implementations.sharding import * +from ....v2.inference_test_utils import skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') def round_up_to_256(x: int) -> int: diff --git a/tests/unit/inference/v2/model_implementations/sharding/test_qkv_sharding.py b/tests/unit/inference/v2/model_implementations/sharding/test_qkv_sharding.py index 9a1cb9c09c64..86575d2176ad 100644 --- a/tests/unit/inference/v2/model_implementations/sharding/test_qkv_sharding.py +++ b/tests/unit/inference/v2/model_implementations/sharding/test_qkv_sharding.py @@ -10,6 +10,10 @@ from deepspeed.accelerator import get_accelerator from deepspeed.inference.v2.model_implementations.sharding import * +from ....v2.inference_test_utils import skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') def fill_with_head_ids(head_size: int, n_heads_q: int, n_heads_kv: Optional[int] = None) -> torch.Tensor: diff --git a/tests/unit/inference/v2/modules/test_blas_linear_module.py b/tests/unit/inference/v2/modules/test_blas_linear_module.py index f4d0b1991238..b50819875699 100644 --- a/tests/unit/inference/v2/modules/test_blas_linear_module.py +++ b/tests/unit/inference/v2/modules/test_blas_linear_module.py @@ -13,7 +13,10 @@ from deepspeed.inference.v2.modules import ConfigBundle from deepspeed.inference.v2.modules.configs import DSLinearConfig from deepspeed.inference.v2.modules.interfaces import DSLinearRegistry -from ...v2.inference_test_utils import allclose +from ...v2.inference_test_utils import allclose, skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') def reference_implementation(hidden_states: torch.Tensor, weight: torch.Tensor, bias: Optional[torch.Tensor], diff --git a/tests/unit/inference/v2/modules/test_blocked_attn.py b/tests/unit/inference/v2/modules/test_blocked_attn.py index 6556aa460a44..b55909a69caf 100644 --- a/tests/unit/inference/v2/modules/test_blocked_attn.py +++ b/tests/unit/inference/v2/modules/test_blocked_attn.py @@ -16,7 +16,10 @@ from deepspeed.inference.v2.modules.interfaces import DSSelfAttentionRegistry, DSSelfAttentionBase from ..kernels.ragged_ops.ragged_testing_utils import build_batch_and_manager -from ...v2.inference_test_utils import allclose +from ...v2.inference_test_utils import allclose, skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') try: from flash_attn.flash_attn_interface import flash_attn_varlen_func diff --git a/tests/unit/inference/v2/modules/test_cuda_pre_ln_module.py b/tests/unit/inference/v2/modules/test_cuda_pre_ln_module.py index 386f3b3ef0b3..dc971f83f0c2 100644 --- a/tests/unit/inference/v2/modules/test_cuda_pre_ln_module.py +++ b/tests/unit/inference/v2/modules/test_cuda_pre_ln_module.py @@ -12,7 +12,10 @@ from deepspeed.inference.v2.modules import ConfigBundle from deepspeed.inference.v2.modules.configs import DSNormConfig from deepspeed.inference.v2.modules.interfaces import DSPreNormRegistry -from ...v2.inference_test_utils import get_dtypes, allclose +from ...v2.inference_test_utils import get_dtypes, allclose, skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') def reference_implementation(residual: torch.Tensor, hidden_states: Optional[torch.Tensor], gamma: torch.Tensor, diff --git a/tests/unit/inference/v2/modules/test_custom_module.py b/tests/unit/inference/v2/modules/test_custom_module.py index eb54b7a913f2..b813b715ec1e 100644 --- a/tests/unit/inference/v2/modules/test_custom_module.py +++ b/tests/unit/inference/v2/modules/test_custom_module.py @@ -11,7 +11,10 @@ from deepspeed.inference.v2.modules.interfaces import DSPostNormRegistry from deepspeed.inference.v2.modules.configs import DSNormConfig from deepspeed.inference.v2.modules.implementations import cuda_post_ln -from ...v2.inference_test_utils import allclose +from ...v2.inference_test_utils import allclose, skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') def reference_implementation(residual: torch.Tensor, hidden_states: torch.Tensor, gamma: torch.Tensor, diff --git a/tests/unit/inference/v2/modules/test_cutlass_moe.py b/tests/unit/inference/v2/modules/test_cutlass_moe.py index b14ba127c6be..27ba70b88728 100644 --- a/tests/unit/inference/v2/modules/test_cutlass_moe.py +++ b/tests/unit/inference/v2/modules/test_cutlass_moe.py @@ -15,7 +15,10 @@ from deepspeed.inference.v2.modules.interfaces import DSMoERegistry from ..kernels.ragged_ops.ragged_testing_utils import build_simple_batch -from ...v2.inference_test_utils import allclose, get_dtypes +from ...v2.inference_test_utils import allclose, get_dtypes, skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') def _gating_reference(logits: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: diff --git a/tests/unit/inference/v2/modules/test_post_ln_module.py b/tests/unit/inference/v2/modules/test_post_ln_module.py index f9dcfd272170..538eb32c3c85 100644 --- a/tests/unit/inference/v2/modules/test_post_ln_module.py +++ b/tests/unit/inference/v2/modules/test_post_ln_module.py @@ -10,7 +10,10 @@ from deepspeed.inference.v2.modules import ConfigBundle from deepspeed.inference.v2.modules.configs import DSNormConfig from deepspeed.inference.v2.modules.interfaces import DSPostNormRegistry -from ...v2.inference_test_utils import get_dtypes, allclose +from ...v2.inference_test_utils import get_dtypes, allclose, skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') def reference_implementation(residual: torch.Tensor, hidden_states: torch.Tensor, gamma: torch.Tensor, diff --git a/tests/unit/inference/v2/modules/test_pre_rms_module.py b/tests/unit/inference/v2/modules/test_pre_rms_module.py index bbd108a35a5a..58bf7761bafa 100644 --- a/tests/unit/inference/v2/modules/test_pre_rms_module.py +++ b/tests/unit/inference/v2/modules/test_pre_rms_module.py @@ -12,7 +12,10 @@ from deepspeed.inference.v2.modules import ConfigBundle from deepspeed.inference.v2.modules.configs import DSNormConfig from deepspeed.inference.v2.modules.interfaces import DSPreNormRegistry -from ...v2.inference_test_utils import get_dtypes, allclose +from ...v2.inference_test_utils import get_dtypes, allclose, skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') def reference_implementation(residual: torch.Tensor, hidden_states: Optional[torch.Tensor], gamma: torch.Tensor, diff --git a/tests/unit/inference/v2/ragged/test_blocked_allocator.py b/tests/unit/inference/v2/ragged/test_blocked_allocator.py index 4596e81c5652..6dddeff6ee9f 100644 --- a/tests/unit/inference/v2/ragged/test_blocked_allocator.py +++ b/tests/unit/inference/v2/ragged/test_blocked_allocator.py @@ -9,7 +9,12 @@ import pytest import torch +from deepspeed.accelerator import get_accelerator from deepspeed.inference.v2.ragged.blocked_allocator import BlockedAllocator +from ...v2.inference_test_utils import skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') @pytest.mark.inference_v2 diff --git a/tests/unit/inference/v2/ragged/test_manager_configs.py b/tests/unit/inference/v2/ragged/test_manager_configs.py index a5f270cced8c..c2dec7673308 100644 --- a/tests/unit/inference/v2/ragged/test_manager_configs.py +++ b/tests/unit/inference/v2/ragged/test_manager_configs.py @@ -7,7 +7,12 @@ from deepspeed.pydantic_v1 import ValidationError +from deepspeed.accelerator import get_accelerator from deepspeed.inference.v2.ragged import DSStateManagerConfig +from ...v2.inference_test_utils import skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') @pytest.mark.inference_v2 diff --git a/tests/unit/inference/v2/ragged/test_ragged_wrapper.py b/tests/unit/inference/v2/ragged/test_ragged_wrapper.py index 3cb74f4c49d2..669368f9ef9d 100644 --- a/tests/unit/inference/v2/ragged/test_ragged_wrapper.py +++ b/tests/unit/inference/v2/ragged/test_ragged_wrapper.py @@ -14,6 +14,10 @@ RaggedBatchWrapper, DSStateManagerConfig, ) +from ...v2.inference_test_utils import skip_on_inference_v2 + +pytestmark = pytest.mark.skipif(skip_on_inference_v2(), + reason=f'Inference V2 not supported by {get_accelerator().device_name()}.') @pytest.mark.inference_v2 diff --git a/tests/unit/megatron_model.py b/tests/unit/megatron_model.py index 011ebaf4d3b9..58d5a1308d95 100644 --- a/tests/unit/megatron_model.py +++ b/tests/unit/megatron_model.py @@ -22,7 +22,7 @@ def get_megatron_version(): def get_gpt2_model(args_others, mp_size=1): - from megatron.model import GPT2Model + from megatron.model import GPT2Model # noqa: F401 from megatron.initialize import initialize_megatron args_defaults = { diff --git a/tests/unit/moe/test_moe.py b/tests/unit/moe/test_moe.py index 310a0df16381..9e3c9634eaf7 100644 --- a/tests/unit/moe/test_moe.py +++ b/tests/unit/moe/test_moe.py @@ -11,6 +11,7 @@ from unit.simple_model import SimplePRMoEModel, SimpleMoEModel, sequence_dataloader from deepspeed.moe.utils import split_params_into_different_moe_groups_for_optimizer, is_moe_param from deepspeed.runtime.utils import required_torch_version +from deepspeed.accelerator import get_accelerator @pytest.mark.parametrize("ep_size", [2, 4]) @@ -34,10 +35,13 @@ def test(self, ep_size, zero_stage, use_residual): } } hidden_dim = 16 + dtype = torch.half # E+D -- ep_size = 2 # E only -- ep_size = 4 model = SimpleMoEModel(hidden_dim, ep_size=ep_size, use_residual=use_residual) + #TODO SW-179530: remove workaround when issue with lazy mode is resolved (see SW-179530). + model.to(get_accelerator().device_name()) param_group = {'params': [p for p in model.parameters()], 'name': 'random-unique-name'} params = split_params_into_different_moe_groups_for_optimizer(param_group) optimizer = torch.optim.AdamW(params=params) @@ -47,7 +51,11 @@ def test(self, ep_size, zero_stage, use_residual): dist_init_required=False) #dist_init_required=False -- parameterize to True/False? - data_loader = sequence_dataloader(model=model, total_samples=50, hidden_dim=hidden_dim, device=model.device) + data_loader = sequence_dataloader(model=model, + total_samples=50, + hidden_dim=hidden_dim, + device=model.device, + dtype=dtype) def strict_average_tensor(tensor): process_group = optimizer.dp_process_group @@ -116,6 +124,7 @@ def test(self, ep_size, use_residual): config_dict = {"train_batch_size": 8, "steps_per_print": 1, "fp16": {"enabled": True}} hidden_dim = 16 + dtype = torch.half # E+D -- ep_size = 2 # E only -- ep_size = 4 @@ -126,7 +135,11 @@ def test(self, ep_size, use_residual): optimizer=optimizer, dist_init_required=False) - data_loader = sequence_dataloader(model=model, total_samples=50, hidden_dim=hidden_dim, device=model.device) + data_loader = sequence_dataloader(model=model, + total_samples=50, + hidden_dim=hidden_dim, + device=model.device, + dtype=dtype) for n, batch in enumerate(data_loader): loss = model(batch[0], batch[1]) diff --git a/tests/unit/ops/adagrad/test_cpu_adagrad.py b/tests/unit/ops/adagrad/test_cpu_adagrad.py index 99e934e2efda..0c675ecd6a85 100644 --- a/tests/unit/ops/adagrad/test_cpu_adagrad.py +++ b/tests/unit/ops/adagrad/test_cpu_adagrad.py @@ -18,8 +18,8 @@ def check_equal(first, second, atol=1e-2, verbose=False): - x = first.detach().numpy() - y = second.detach().numpy() + x = first.detach().float().numpy() + y = second.detach().float().numpy() if verbose: print("x = {}".format(x.flatten())) print("y = {}".format(y.flatten())) diff --git a/tests/unit/ops/adam/test_adamw.py b/tests/unit/ops/adam/test_adamw.py index 3b1b088766a5..0b4aad451561 100644 --- a/tests/unit/ops/adam/test_adamw.py +++ b/tests/unit/ops/adam/test_adamw.py @@ -6,12 +6,12 @@ import deepspeed import torch import pytest - from deepspeed.ops.adam import FusedAdam from deepspeed.ops.adam import DeepSpeedCPUAdam from unit.common import DistributedTest from unit.simple_model import SimpleModel from deepspeed.accelerator import get_accelerator +from deepspeed.ops.op_builder import FusedAdamBuilder if torch.half not in get_accelerator().supported_dtypes(): pytest.skip(f"fp16 not supported, valid dtype: {get_accelerator().supported_dtypes()}", allow_module_level=True) @@ -67,6 +67,9 @@ def test(self, "cpu_offload": zero_offload } } + if (resulting_optimizer[0] == FusedAdam) and (not deepspeed.ops.__compatible_ops__[FusedAdamBuilder.NAME]): + pytest.skip("FusedAdam is not compatible") + model = SimpleModel(10) model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, diff --git a/tests/unit/ops/adam/test_cpu_adam.py b/tests/unit/ops/adam/test_cpu_adam.py index 9a6ff6689446..2e232ca7477b 100644 --- a/tests/unit/ops/adam/test_cpu_adam.py +++ b/tests/unit/ops/adam/test_cpu_adam.py @@ -11,7 +11,7 @@ import deepspeed from deepspeed.accelerator import get_accelerator from deepspeed.ops.adam import FusedAdam -from deepspeed.ops.op_builder import CPUAdamBuilder +from deepspeed.ops.op_builder import CPUAdamBuilder, FusedAdamBuilder from unit.common import DistributedTest if not deepspeed.ops.__compatible_ops__[CPUAdamBuilder.NAME]: @@ -21,8 +21,8 @@ def check_equal(first, second, atol=1e-2, verbose=False): - x = first.detach().numpy() - y = second.detach().numpy() + x = first.detach().float().numpy() + y = second.detach().float().numpy() print("ATOL", atol) if verbose: print("x = {}".format(x.flatten())) @@ -62,6 +62,8 @@ class TestCPUAdam(DistributedTest): set_dist_env = False @pytest.mark.skipif(not get_accelerator().is_available(), reason="only supported in CUDA environments.") + @pytest.mark.skipif(not deepspeed.ops.__compatible_ops__[FusedAdamBuilder.NAME], + reason="FusedAdam is not compatible") def test_fused_adam_equal(self, dtype, model_size): if ("amd" in pytest.cpu_vendor) and (dtype == torch.half): pytest.skip("cpu-adam with half precision not supported on AMD CPUs") @@ -89,6 +91,8 @@ def test_fused_adam_equal(self, dtype, model_size): def test_torch_adamw_equal(self, dtype, model_size): if get_accelerator().is_available(): + if dtype == torch.half: + pytest.skip("torch.optim.AdamW with half precision inf/nan output.") if ("amd" in pytest.cpu_vendor) and (dtype == torch.half): pytest.skip("cpu-adam with half precision not supported on AMD CPUs") ref_param_device = get_accelerator().device_name() diff --git a/tests/unit/ops/adam/test_hybrid_adam.py b/tests/unit/ops/adam/test_hybrid_adam.py index c7ef4890b322..d4559b0fa640 100644 --- a/tests/unit/ops/adam/test_hybrid_adam.py +++ b/tests/unit/ops/adam/test_hybrid_adam.py @@ -12,7 +12,7 @@ import deepspeed from deepspeed.accelerator import get_accelerator from deepspeed.ops.adam import FusedAdam, DeepSpeedCPUAdam -from deepspeed.ops.op_builder import CPUAdamBuilder +from deepspeed.ops.op_builder import CPUAdamBuilder, FusedAdamBuilder from unit.common import DistributedTest if not deepspeed.ops.__compatible_ops__[CPUAdamBuilder.NAME]: @@ -22,8 +22,8 @@ def check_equal(first, second, atol=1e-2, verbose=False): - x = first.detach().numpy() - y = second.detach().numpy() + x = first.detach().float().numpy() + y = second.detach().float().numpy() print("ATOL", atol) if verbose: print("x = {}".format(x.flatten())) @@ -43,6 +43,8 @@ class TestHybridAdam(DistributedTest): set_dist_env = False @pytest.mark.skipif(not get_accelerator().is_available(), reason="only supported in CUDA environments.") + @pytest.mark.skipif(not deepspeed.ops.__compatible_ops__[FusedAdamBuilder.NAME], + reason="FusedAdam is not compatible") def test_hybrid_adam_equal(self, dtype, model_size): if ("amd" in pytest.cpu_vendor) and (dtype == torch.half): pytest.skip("cpu-adam with half precision not supported on AMD CPUs") diff --git a/tests/unit/ops/aio/test_aio.py b/tests/unit/ops/aio/test_aio.py old mode 100644 new mode 100755 diff --git a/tests/unit/ops/lion/test_cpu_lion.py b/tests/unit/ops/lion/test_cpu_lion.py index 61a069af3257..e2a64cf246a0 100644 --- a/tests/unit/ops/lion/test_cpu_lion.py +++ b/tests/unit/ops/lion/test_cpu_lion.py @@ -21,8 +21,8 @@ def check_equal(first, second, atol=1e-2, verbose=False): - x = first.detach().numpy() - y = second.detach().numpy() + x = first.detach().float().numpy() + y = second.detach().float().numpy() print("ATOL", atol) if verbose: print("x = {}".format(x.flatten())) diff --git a/tests/unit/ops/transformer/inference/inference_test_utils.py b/tests/unit/ops/transformer/inference/inference_test_utils.py index 9c7b428c0e68..9af48e12f4fb 100644 --- a/tests/unit/ops/transformer/inference/inference_test_utils.py +++ b/tests/unit/ops/transformer/inference/inference_test_utils.py @@ -26,8 +26,10 @@ def get_tolerances(): def get_dtypes(): global DTYPES if DTYPES is None: - DTYPES = [torch.float16, torch.float32] + DTYPES = [torch.float32] try: + if get_accelerator().is_fp16_supported(): + DTYPES.append(torch.float16) if get_accelerator().is_bf16_supported(): DTYPES.append(torch.bfloat16) except (AssertionError, AttributeError): diff --git a/tests/unit/ops/transformer/inference/test_bias_add.py b/tests/unit/ops/transformer/inference/test_bias_add.py index 843c9b889c2b..f25bbc1be692 100644 --- a/tests/unit/ops/transformer/inference/test_bias_add.py +++ b/tests/unit/ops/transformer/inference/test_bias_add.py @@ -8,12 +8,13 @@ import deepspeed from deepspeed.accelerator import get_accelerator from deepspeed.ops.op_builder import InferenceBuilder +from deepspeed.ops.transformer import DeepSpeedInferenceConfig +from deepspeed.ops.transformer.inference.op_binding.bias_add import BiasAddOp from .inference_test_utils import allclose, get_dtypes if not deepspeed.ops.__compatible_ops__[InferenceBuilder.NAME]: pytest.skip("Inference ops are not available on this system", allow_module_level=True) -inference_module = None torch_minor_version = None @@ -22,15 +23,8 @@ def run_bias_add_reference(activations, bias): def run_bias_add_ds(activations, bias): - global inference_module - if inference_module is None: - inference_module = InferenceBuilder().load() - if activations.dtype == torch.float16: - return inference_module.bias_add_fp16(activations, bias) - elif activations.dtype == torch.bfloat16: - return inference_module.bias_add_bf16(activations, bias) - else: - return inference_module.bias_add_fp32(activations, bias) + config = DeepSpeedInferenceConfig(dtype=activations.dtype) + return BiasAddOp(config)(activations, bias) @pytest.mark.inference_ops diff --git a/tests/unit/ops/transformer/inference/test_bias_geglu.py b/tests/unit/ops/transformer/inference/test_bias_geglu.py index d5ab13964974..05de4fbb4cf8 100644 --- a/tests/unit/ops/transformer/inference/test_bias_geglu.py +++ b/tests/unit/ops/transformer/inference/test_bias_geglu.py @@ -8,13 +8,13 @@ import deepspeed from deepspeed.ops.op_builder import InferenceBuilder from deepspeed.accelerator import get_accelerator +from deepspeed.ops.transformer.inference.op_binding.gated_activation import GatedActivationOp from deepspeed.utils.types import ActivationFuncType from .inference_test_utils import allclose, get_dtypes if not deepspeed.ops.__compatible_ops__[InferenceBuilder.NAME]: pytest.skip("Inference ops are not available on this system", allow_module_level=True) -inference_module = None torch_minor_version = None @@ -27,10 +27,7 @@ def run_bias_geglu_reference(activations, bias): def run_bias_geglu_ds(activation, bias): - global inference_module - if inference_module is None: - inference_module = InferenceBuilder().load() - return inference_module.gated_activation(activation, bias, ActivationFuncType.GATED_GELU) + return GatedActivationOp()(activation, bias, ActivationFuncType.GATED_GELU) @pytest.mark.inference_ops @@ -56,17 +53,14 @@ def run_gated_silu_reference(activations, bias): def run_gated_silu_ds(activation, bias): - global inference_module - if inference_module is None: - inference_module = InferenceBuilder().load() - return inference_module.gated_activation(activation, bias, ActivationFuncType.GATED_SILU) + return GatedActivationOp()(activation, bias, ActivationFuncType.GATED_SILU) @pytest.mark.inference_ops @pytest.mark.parametrize("batch", [1, 2]) @pytest.mark.parametrize("sequence", [1, 128, 255]) @pytest.mark.parametrize("channels", [512, 1232, 4096]) -@pytest.mark.parametrize("dtype", [torch.float16, torch.float32]) +@pytest.mark.parametrize("dtype", get_dtypes()) def test_gated_silu(batch, sequence, channels, dtype): activation = torch.randn((batch, sequence, channels * 2), dtype=dtype, device=get_accelerator().device_name()) bias = torch.randn((channels * 2), dtype=dtype, device=get_accelerator().device_name()) diff --git a/tests/unit/ops/transformer/inference/test_bias_gelu.py b/tests/unit/ops/transformer/inference/test_bias_gelu.py index fd82da51380c..b69030e87ace 100644 --- a/tests/unit/ops/transformer/inference/test_bias_gelu.py +++ b/tests/unit/ops/transformer/inference/test_bias_gelu.py @@ -8,13 +8,14 @@ import deepspeed from deepspeed.accelerator import get_accelerator from deepspeed.ops.op_builder import InferenceBuilder +from deepspeed.ops.transformer import DeepSpeedInferenceConfig +from deepspeed.ops.transformer.inference.op_binding.bias_gelu import BiasGeluOp from .inference_test_utils import allclose, get_dtypes from packaging import version as pkg_version if not deepspeed.ops.__compatible_ops__[InferenceBuilder.NAME]: pytest.skip("Inference ops are not available on this system", allow_module_level=True) -inference_module = None torch_minor_version = None @@ -25,15 +26,8 @@ def run_bias_gelu_reference(activations, bias): def run_bias_gelu_ds(activations, bias): - global inference_module - if inference_module is None: - inference_module = InferenceBuilder().load() - if activations.dtype == torch.float16: - return inference_module.bias_gelu_fp16(activations, bias) - elif activations.dtype == torch.bfloat16: - return inference_module.bias_gelu_bf16(activations, bias) - else: - return inference_module.bias_gelu_fp32(activations, bias) + config = DeepSpeedInferenceConfig(dtype=activations.dtype) + return BiasGeluOp(config)(activations, bias) @pytest.mark.inference_ops diff --git a/tests/unit/ops/transformer/inference/test_bias_relu.py b/tests/unit/ops/transformer/inference/test_bias_relu.py index 881af78e92cf..57134665b241 100644 --- a/tests/unit/ops/transformer/inference/test_bias_relu.py +++ b/tests/unit/ops/transformer/inference/test_bias_relu.py @@ -8,12 +8,13 @@ import deepspeed from deepspeed.accelerator import get_accelerator from deepspeed.ops.op_builder import InferenceBuilder +from deepspeed.ops.transformer import DeepSpeedInferenceConfig +from deepspeed.ops.transformer.inference.op_binding.bias_relu import BiasReluOp from .inference_test_utils import allclose, get_dtypes if not deepspeed.ops.__compatible_ops__[InferenceBuilder.NAME]: pytest.skip("Inference ops are not available on this system", allow_module_level=True) -inference_module = None torch_minor_version = None @@ -23,15 +24,8 @@ def run_bias_relu_reference(activations, bias): def run_bias_relu_ds(activations, bias): - global inference_module - if inference_module is None: - inference_module = InferenceBuilder().load() - if activations.dtype == torch.float16: - return inference_module.bias_relu_fp16(activations, bias) - elif activations.dtype == torch.bfloat16: - return inference_module.bias_relu_bf16(activations, bias) - else: - return inference_module.bias_relu_fp32(activations, bias) + config = DeepSpeedInferenceConfig(dtype=activations.dtype) + return BiasReluOp(config)(activations, bias) @pytest.mark.inference_ops diff --git a/tests/unit/ops/transformer/inference/test_gelu.py b/tests/unit/ops/transformer/inference/test_gelu.py index de924848bfb4..beb74d09ab30 100644 --- a/tests/unit/ops/transformer/inference/test_gelu.py +++ b/tests/unit/ops/transformer/inference/test_gelu.py @@ -7,11 +7,12 @@ import torch import deepspeed from deepspeed.ops.op_builder import InferenceBuilder +from deepspeed.ops.transformer import DeepSpeedInferenceConfig +from deepspeed.ops.transformer.inference.op_binding.bias_gelu import BiasGeluOp if not deepspeed.ops.__compatible_ops__[InferenceBuilder.NAME]: pytest.skip("Inference ops are not available on this system", allow_module_level=True) -inference_module = None torch_minor_version = None @@ -42,15 +43,11 @@ def run_gelu_ds(activations, use_triton_ops=False): from deepspeed.ops.transformer.inference.triton import gelu return gelu(activations) + device = deepspeed.accelerator.get_accelerator().device_name() channels = activations.shape[-1] - bias = torch.zeros((channels), dtype=activations.dtype, device='cuda') - global inference_module - if inference_module is None: - inference_module = InferenceBuilder().load() - if activations.dtype == torch.float16: - return inference_module.bias_gelu_fp16(activations, bias) - else: - return inference_module.bias_gelu_fp32(activations, bias) + bias = torch.zeros((channels), dtype=activations.dtype, device=device) + config = DeepSpeedInferenceConfig(dtype=activations.dtype) + return BiasGeluOp(config)(activations, bias) @pytest.mark.inference_ops @@ -60,7 +57,8 @@ def run_gelu_ds(activations, use_triton_ops=False): @pytest.mark.parametrize("dtype", [torch.float16]) @pytest.mark.parametrize("use_triton_ops", [True, False]) def test_gelu(batch, sequence, channels, dtype, use_triton_ops): - activations_ds = torch.randn((batch, sequence, channels), dtype=dtype, device='cuda') + device = deepspeed.accelerator.get_accelerator().device_name() + activations_ds = torch.randn((batch, sequence, channels), dtype=dtype, device=device) activations_ref = activations_ds.clone().detach() if not deepspeed.HAS_TRITON and use_triton_ops: diff --git a/tests/unit/ops/transformer/inference/test_layer_norm.py b/tests/unit/ops/transformer/inference/test_layer_norm.py index 711a35213015..9c2a25b3bbc0 100644 --- a/tests/unit/ops/transformer/inference/test_layer_norm.py +++ b/tests/unit/ops/transformer/inference/test_layer_norm.py @@ -8,6 +8,7 @@ import pytest from deepspeed.accelerator import get_accelerator from deepspeed.ops.op_builder import InferenceBuilder +from deepspeed.ops.transformer.inference.op_binding.layer_norm import LayerNormOp from .inference_test_utils import allclose, get_dtypes, assert_almost_equal try: import triton # noqa: F401 # type: ignore @@ -21,8 +22,6 @@ if not deepspeed.ops.__compatible_ops__[InferenceBuilder.NAME]: pytest.skip("Inference ops are not available on this system", allow_module_level=True) -inference_module = None - def ref_implementation(vals, gamma, beta, epsilon, channels, dtype): vals_f = vals.to(torch.float32) @@ -32,10 +31,7 @@ def ref_implementation(vals, gamma, beta, epsilon, channels, dtype): def ds_implementation(vals, gamma, beta, epsilon): - global inference_module - if inference_module is None: - inference_module = InferenceBuilder().load() - return inference_module.layer_norm(vals, gamma, beta, epsilon) + return LayerNormOp()(vals, gamma, beta, epsilon) def ds_triton_implementation(vals, gamma, beta, epsilon): @@ -83,10 +79,7 @@ def residual_ref_implementation(vals, bias, res, gamma, beta, epsilon, channels, def residual_ds_implementation(vals, bias, res, gamma, beta, epsilon): - global inference_module - if inference_module is None: - inference_module = InferenceBuilder().load() - return inference_module._layer_norm_residual(vals, bias, res, gamma, beta, epsilon) + return LayerNormOp.layer_norm_residual(vals, bias, res, gamma, beta, epsilon) def residual_ds_triton_implementation(vals, bias, res, gamma, beta, epsilon): @@ -137,10 +130,7 @@ def residual_store_ref_implementation(vals, bias, res, gamma, beta, epsilon, cha def residual_store_ds_implementation(vals, bias, res, gamma, beta, epsilon): - global inference_module - if inference_module is None: - inference_module = InferenceBuilder().load() - return inference_module.layer_norm_residual_store_pre_ln_res(vals, bias, res, gamma, beta, epsilon) + return LayerNormOp.layer_norm_residual_store_pre_ln_res(vals, bias, res, gamma, beta, epsilon) @pytest.mark.inference_ops @@ -175,6 +165,7 @@ def test_layer_norm_residual_store_pre_ln_res(batch, seq_len, channels, dtype): def test_triton_layer_norm(M, N, dtype, residual, input_bias, eps=1e-5, device='cuda'): if not deepspeed.HAS_TRITON: pytest.skip("triton has to be installed for the test") + torch.manual_seed(0) # create data x_shape = (M, N) diff --git a/tests/unit/ops/transformer/inference/test_moe_res_matmult.py b/tests/unit/ops/transformer/inference/test_moe_res_matmult.py index e1c8127a83ac..dcf9f16baaf1 100644 --- a/tests/unit/ops/transformer/inference/test_moe_res_matmult.py +++ b/tests/unit/ops/transformer/inference/test_moe_res_matmult.py @@ -8,24 +8,20 @@ import deepspeed from deepspeed.accelerator import get_accelerator from deepspeed.ops.op_builder import InferenceBuilder +from deepspeed.ops.transformer.inference.op_binding.moe_res_matmul import MoEResMatmulOp from .inference_test_utils import allclose, get_dtypes if not deepspeed.ops.__compatible_ops__[InferenceBuilder.NAME]: pytest.skip("Inference ops are not available on this system", allow_module_level=True) -inference_module = None - def run_moe_res_matmul_reference(residual, coef1, coef2, output): return residual * coef1 + output * coef2 def run_moe_res_matmul_ds(residual, coef, output): - global inference_module - if inference_module is None: - inference_module = InferenceBuilder().load() coef_t = coef.transpose(-1, -2).contiguous() - return inference_module.moe_res_matmul(residual, coef_t, output) + return MoEResMatmulOp()(residual, coef_t, output) @pytest.mark.inference_ops diff --git a/tests/unit/ops/transformer/inference/test_residual_add.py b/tests/unit/ops/transformer/inference/test_residual_add.py index c2952f74ff2d..807da4904341 100644 --- a/tests/unit/ops/transformer/inference/test_residual_add.py +++ b/tests/unit/ops/transformer/inference/test_residual_add.py @@ -8,6 +8,8 @@ import deepspeed from deepspeed.accelerator import get_accelerator from deepspeed.ops.op_builder import InferenceBuilder +from deepspeed.ops.transformer import DeepSpeedInferenceConfig +from deepspeed.ops.transformer.inference.op_binding import ResidualAddOp from .inference_test_utils import get_dtypes if not deepspeed.ops.__compatible_ops__[InferenceBuilder.NAME]: @@ -36,11 +38,6 @@ def allclose(x, y): return torch.allclose(x, y, rtol=rtol, atol=atol) -@pytest.fixture(scope="module") -def inference_module(): - return InferenceBuilder().load() - - def res_add_bias_ref(hidden_state, residual, attn_output, attn_bias, final_bias, mp_size=1, pre_attn_norm=True): if pre_attn_norm: hidden_state += (residual + final_bias + attn_output + attn_bias) / mp_size @@ -75,9 +72,9 @@ def run_residual_add_reference(hidden_state, residual, attn_output, attn_bias, f @pytest.mark.parametrize("mp_size", [1, 2]) @pytest.mark.parametrize("pre_attn_norm", [True, False]) @pytest.mark.parametrize("use_triton_ops", [True, False]) -def test_residual_add(inference_module, batch, sequence, hidden_dim, dtype, mlp_after_attn, add_bias, mp_size, - pre_attn_norm, use_triton_ops): - if not deepspeed.HAS_TRITON and use_triton_ops and dtype == torch.float16: +def test_residual_add(batch, sequence, hidden_dim, dtype, mlp_after_attn, add_bias, mp_size, pre_attn_norm, + use_triton_ops): + if not deepspeed.HAS_TRITON and use_triton_ops: pytest.skip("triton has to be installed for the test") ds_out = torch.randn((batch, sequence, hidden_dim), dtype=dtype, device=get_accelerator().device_name()) residual = torch.randn((batch, sequence, hidden_dim), dtype=dtype, device=get_accelerator().device_name()) @@ -96,19 +93,9 @@ def test_residual_add(inference_module, batch, sequence, hidden_dim, dtype, mlp_ if use_triton_ops: from deepspeed.ops.transformer.inference.triton import residual_add_bias ds_out = residual_add_bias(*res_add_args) - if dtype == torch.float16: - ds_out = inference_module.residual_add_bias_fp16(*res_add_args) - elif dtype == torch.float32: - ds_out = inference_module.residual_add_bias_fp32(*res_add_args) - elif dtype == torch.bfloat16: - ds_out = inference_module.residual_add_bias_bf16(*res_add_args) else: - if dtype == torch.float16: - ds_out = inference_module.residual_add_bias_fp16(*res_add_args) - elif dtype == torch.float32: - ds_out = inference_module.residual_add_bias_fp32(*res_add_args) - else: - raise ValueError(f"Unsupported dtype: {dtype}") + config = DeepSpeedInferenceConfig(dtype=dtype) + ds_out = ResidualAddOp(config).residual_add_func(*res_add_args) if not allclose(ds_out, ref_out): print((ds_out - ref_out).abs().max()) diff --git a/tests/unit/ops/transformer/inference/test_rms_norm.py b/tests/unit/ops/transformer/inference/test_rms_norm.py index 508a40e12e8d..ed500ec16f9f 100644 --- a/tests/unit/ops/transformer/inference/test_rms_norm.py +++ b/tests/unit/ops/transformer/inference/test_rms_norm.py @@ -8,13 +8,13 @@ import pytest from deepspeed.accelerator import get_accelerator from deepspeed.ops.op_builder import InferenceBuilder # type: ignore +from deepspeed.ops.transformer.inference.op_binding.pre_rms_norm import PreRMSNormOp +from deepspeed.ops.transformer.inference.op_binding.rms_norm import RMSNormOp from .inference_test_utils import allclose, get_dtypes if not deepspeed.ops.__compatible_ops__[InferenceBuilder.NAME]: pytest.skip("Inference ops are not available on this system", allow_module_level=True) -inference_module = None - def ref_implementation(vals, gamma, epsilon): variance = vals.to(torch.float32).pow(2).mean(-1, keepdim=True) @@ -27,10 +27,7 @@ def ref_implementation(vals, gamma, epsilon): def ds_implementation(vals, gamma, epsilon): - global inference_module - if inference_module is None: - inference_module = InferenceBuilder().load() - return inference_module.rms_norm(vals, gamma, epsilon) + return RMSNormOp()(vals, gamma, epsilon) @pytest.mark.inference_ops @@ -51,10 +48,7 @@ def test_rms_norm(batch, seq_len, channels, dtype): def pre_ds_implementation(vals, residual, gamma, epsilon): - global inference_module - if inference_module is None: - inference_module = InferenceBuilder().load() - return inference_module.pre_rms_norm(vals, residual, gamma, epsilon) + return PreRMSNormOp()(vals, residual, gamma, epsilon) def pre_ref_implementation(vals, residual, gamma, epsilon): @@ -74,7 +68,7 @@ def pre_ref_implementation(vals, residual, gamma, epsilon): @pytest.mark.parametrize("batch", [1, 32]) @pytest.mark.parametrize("seq_len", [1, 128]) @pytest.mark.parametrize("channels", [384, 512, 768, 1024, 2048, 8192, 14432]) -@pytest.mark.parametrize("dtype", [torch.float16, torch.float32]) +@pytest.mark.parametrize("dtype", get_dtypes()) def test_pre_norm(batch, seq_len, channels, dtype): device = get_accelerator().current_device_name() vals = torch.randn((batch, seq_len, channels), dtype=dtype, device=device) diff --git a/tests/unit/ops/transformer/inference/test_softmax.py b/tests/unit/ops/transformer/inference/test_softmax.py index 76046f31e01a..4f6d69160aa7 100644 --- a/tests/unit/ops/transformer/inference/test_softmax.py +++ b/tests/unit/ops/transformer/inference/test_softmax.py @@ -11,7 +11,6 @@ if not deepspeed.ops.__compatible_ops__[InferenceBuilder.NAME]: pytest.skip("Inference ops are not available on this system", allow_module_level=True) -inference_module = None torch_minor_version = None @@ -43,7 +42,9 @@ def run_softmax_ds(input, use_triton_ops=False): def test_softmax(batch, sequence, channels, dtype, use_triton_ops): if not deepspeed.HAS_TRITON and use_triton_ops: pytest.skip("triton has to be installed for the test") - input_ds = torch.randn((batch, sequence, channels), dtype=dtype, device='cuda') + + device = deepspeed.accelerator.get_accelerator().device_name() + input_ds = torch.randn((batch, sequence, channels), dtype=dtype, device=device) input_ref = input_ds.clone().detach() ds_out = run_softmax_ds(input_ds, use_triton_ops) diff --git a/tests/unit/pipe/test_pipe_module.py b/tests/unit/pipe/test_pipe_module.py index 05c6a82ef55a..9deb5ff9efd7 100644 --- a/tests/unit/pipe/test_pipe_module.py +++ b/tests/unit/pipe/test_pipe_module.py @@ -71,7 +71,6 @@ def test(self, sequential_model, simple_config, batch_input, activation_checkpoi pipe_model = copy.deepcopy(sequential_model) pipe_model = PipelineModule(layers=pipe_model, num_stages=2) - # Ensure all parameters are accounted for. my_params = sum(p.numel() for p in pipe_model.parameters()) total_pipe_params = torch.LongTensor([my_params]).to(get_accelerator().device_name()) diff --git a/tests/unit/profiling/flops_profiler/test_flops_profiler.py b/tests/unit/profiling/flops_profiler/test_flops_profiler.py index bbcb01b489f4..7f21982760ff 100644 --- a/tests/unit/profiling/flops_profiler/test_flops_profiler.py +++ b/tests/unit/profiling/flops_profiler/test_flops_profiler.py @@ -82,6 +82,8 @@ def test(self): "top_modules": 3, }, } + dtype = torch.half + hidden_dim = 10 model = SimpleModel(hidden_dim, empty_grad=False) @@ -91,7 +93,7 @@ def test(self): total_samples=50, hidden_dim=hidden_dim, device=model.device, - dtype=torch.half) + dtype=dtype) for n, batch in enumerate(data_loader): loss = model(batch[0], batch[1]) model.backward(loss) diff --git a/tests/unit/runtime/comm/test_coalesced_collectives.py b/tests/unit/runtime/comm/test_coalesced_collectives.py index d9ac79619bd3..6c677cf46482 100644 --- a/tests/unit/runtime/comm/test_coalesced_collectives.py +++ b/tests/unit/runtime/comm/test_coalesced_collectives.py @@ -18,7 +18,8 @@ class TestReduceScatterCoalesced(DistributedTest): world_size = 2 def test_single_input(self): - input = torch.full((6, ), dist.get_rank(), dtype=torch.half, device=get_accelerator().current_device_name()) + dtype = torch.half + input = torch.full((6, ), dist.get_rank(), dtype=dtype, device=get_accelerator().current_device_name()) (output, ) = reduce_scatter_coalesced([input], dist.get_world_group()) @@ -26,7 +27,8 @@ def test_single_input(self): assert torch.allclose(output, torch.full_like(output, 0.5)) def test_two_inputs(self): - tensor_kwargs = {"device": get_accelerator().current_device_name(), "dtype": torch.half} + dtype = torch.half + tensor_kwargs = {"device": get_accelerator().current_device_name(), "dtype": dtype} inputs = [ dist.get_rank() * torch.arange(0, 6, **tensor_kwargs), dist.get_rank() * torch.arange(6, 9, **tensor_kwargs), @@ -50,7 +52,8 @@ class TestReduceScatterCoalescedTensorSmallerThanWorldSize(DistributedTest): world_size = 2 def test(self): - input = torch.zeros((1, ), dtype=torch.half, device=get_accelerator().current_device_name()) + dtype = torch.half + input = torch.zeros((1, ), dtype=dtype, device=get_accelerator().current_device_name()) (output, ) = reduce_scatter_coalesced([input], dist.get_world_group()) diff --git a/tests/unit/runtime/compile/test_compile_wrapper.py b/tests/unit/runtime/compile/test_compile_wrapper.py deleted file mode 100644 index 98a7c28c6a28..000000000000 --- a/tests/unit/runtime/compile/test_compile_wrapper.py +++ /dev/null @@ -1,83 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# SPDX-License-Identifier: Apache-2.0 - -# DeepSpeed Team - -import pytest -import torch - -import deepspeed -from deepspeed.accelerator import get_accelerator -from deepspeed.runtime.utils import required_torch_version - -from unit.common import DistributedTest - -pytestmark = pytest.mark.skipif(not required_torch_version(min_version=2.1), - reason="Compile tests requires Pytorch version 2.1 or above") - - -@pytest.fixture -def base_config(): - config_dict = { - "train_micro_batch_size_per_gpu": 1, - "optimizer": { - "type": "Adam", - "params": { - "lr": 0.00015 - } - }, - "fp16": { - "enabled": True - }, - "compile": { - "enabled": True, - "backend": "inductor" - } - } - return config_dict - - -class SmallModelWithCustomMethod(torch.nn.Module): - - def __init__(self, hidden_dim, test_value): - super(SmallModelWithCustomMethod, self).__init__() - self.fc = torch.nn.Linear(hidden_dim, hidden_dim, bias=False) - self.v = test_value - - def forward(self, x): - return self.fc(x) - - # Custom function that is not part of DeepSpeed engine. - def get_v(self): - return self.v - - -class TestCustomMethod(DistributedTest): - world_size = 1 - non_daemonic_procs = True - - def _init_engine(self, config, test_value): - hidden_dim = 10 - model = SmallModelWithCustomMethod(hidden_dim, test_value) - engine, _, _, _ = deepspeed.initialize(config=config, model=model, model_parameters=model.parameters()) - return engine - - def _run_model(self, engine): - train_batch_size = 1 - device = torch.device(get_accelerator().current_device_name()) - dtype = engine.module.fc.weight.dtype - hidden_dim = engine.module.fc.weight.shape[1] - x = torch.rand(train_batch_size, hidden_dim, device=device, dtype=dtype) - engine(x) - - @pytest.mark.skipif(not deepspeed.is_compile_supported(), reason="torch.compile is not supported") - def test_custom_function(self, base_config): - test_value = 10 - - engine = self._init_engine(base_config, test_value) - assert engine.module.get_v() == test_value - self._run_model(engine) - - # The model is compiled after the first run. - # Thus we make sure the custom method is still available after compilation. - assert engine.module.get_v() == test_value diff --git a/tests/unit/runtime/compile/test_compile_zero.py b/tests/unit/runtime/compile/test_compile_zero.py index 910f32db1c96..03c03361f511 100644 --- a/tests/unit/runtime/compile/test_compile_zero.py +++ b/tests/unit/runtime/compile/test_compile_zero.py @@ -45,10 +45,6 @@ def test_compile_zero(self, tmpdir, zero_stage, dtype, offload_device): }, "zero_optimization": { "stage": zero_stage, - }, - "compile": { - "enabled": True, - "backend": "inductor" } } diff --git a/tests/unit/runtime/compile/test_load_config.py b/tests/unit/runtime/compile/test_load_config.py deleted file mode 100644 index 5f1c01b86852..000000000000 --- a/tests/unit/runtime/compile/test_load_config.py +++ /dev/null @@ -1,118 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# SPDX-License-Identifier: Apache-2.0 - -# DeepSpeed Team - -import pytest -import torch - -from unit.simple_model import SimpleModel -import deepspeed -from deepspeed.accelerator import get_accelerator -from deepspeed.runtime.utils import required_torch_version - -from unit.common import DistributedTest - -pytestmark = pytest.mark.skipif(not required_torch_version(min_version=2.1), - reason="Compile tests requires Pytorch version 2.1 or above") - -custom_backend_called = False -custom_compler_fn_called = False - -if deepspeed.is_compile_supported(): - # PyTorch v1 does not have torch.fx - def custom_backend(gm: torch.fx.GraphModule, example_inputs): - global custom_backend_called - custom_backend_called = True - return gm.forward - - def custom_compiler_fn(module: torch.nn.Module): - global custom_compler_fn_called - custom_compler_fn_called = True - return torch.compile(module) - - -@pytest.fixture -def base_config(): - config_dict = { - "train_micro_batch_size_per_gpu": 1, - "optimizer": { - "type": "Adam", - "params": { - "lr": 0.00015 - } - }, - "fp16": { - "enabled": True - }, - "compile": { - "enabled": True, - "backend": "inductor" - } - } - return config_dict - - -class TestConfigLoad(DistributedTest): - world_size = 1 - non_daemonic_procs = True - - def _init_engine(self, config): - hidden_dim = 10 - model = SimpleModel(hidden_dim) - engine, _, _, _ = deepspeed.initialize(config=config, model=model, model_parameters=model.parameters()) - return engine - - def _run_model(self, engine): - train_batch_size = 1 - device = torch.device(get_accelerator().current_device_name()) - dtype = engine.module.linears[0].weight.dtype - hidden_dim = engine.module.linears[0].weight.shape[1] - x = torch.rand(train_batch_size, hidden_dim, device=device, dtype=dtype) - y = torch.randn_like(x) - engine(x, y) - - @pytest.mark.skipif(not deepspeed.is_compile_supported(), reason="torch.compile is not supported") - def test_compile(self, base_config): - engine = self._init_engine(base_config) - self._run_model(engine) - assert engine.is_compiled - - @pytest.mark.skipif(not deepspeed.is_compile_supported(), reason="torch.compile is not supported") - def test_custom_backend(self, base_config): - global custom_backend_called - custom_backend_called = False - - engine = self._init_engine(base_config) - engine.set_backend(f"{__name__}.custom_backend") - self._run_model(engine) - assert custom_backend_called - - def test_compile_disabled(self, base_config): - base_config["compile"]["enabled"] = False - engine = self._init_engine(base_config) - self._run_model(engine) - - @pytest.mark.skipif(not deepspeed.is_compile_supported(), reason="torch.compile is not supported") - def test_compile_kwargs(self, base_config): - base_config["compile"]["kwargs"] = {"mode": "default"} - engine = self._init_engine(base_config) - self._run_model(engine) - assert "mode" in engine.torch_compile_kwargs - - @pytest.mark.skipif(not deepspeed.is_compile_supported(), reason="torch.compile is not supported") - def test_set_compile_kwargs(self, base_config): - engine = self._init_engine(base_config) - engine.set_torch_compile_kwargs({"mode": "default"}) - self._run_model(engine) - assert "mode" in engine.torch_compile_kwargs - - @pytest.mark.skipif(not deepspeed.is_compile_supported(), reason="torch.compile is not supported") - def test_set_compiler_fn(self, base_config): - global custom_compler_fn_called - custom_compler_fn_called = False - - engine = self._init_engine(base_config) - engine.set_compiler_fn(custom_compiler_fn) - self._run_model(engine) - assert custom_compler_fn_called diff --git a/tests/unit/runtime/compile/util.py b/tests/unit/runtime/compile/util.py index 86eadf3f6976..d53886a81429 100644 --- a/tests/unit/runtime/compile/util.py +++ b/tests/unit/runtime/compile/util.py @@ -84,7 +84,6 @@ def compare_loss(self, config, dtype): baseline_config = deepcopy(config) baseline_config["zero_optimization"]["stage"] = 0 baseline_config["zero_optimization"]["offload_optimizer"] = {} - baseline_config["compile"]["enabled"] = False baseline_engine, baseline_optimizer, _, _ = deepspeed.initialize(config=baseline_config, model=baseline_model, model_parameters=baseline_model.parameters()) @@ -101,6 +100,7 @@ def compare_loss(self, config, dtype): target_engine, target_optimizer, _, _ = deepspeed.initialize(config=config, model=target_model, model_parameters=target_model.parameters()) + target_engine.compile() train_batch_size = config["train_micro_batch_size_per_gpu"] diff --git a/tests/unit/runtime/half_precision/onebit/test_onebit.py b/tests/unit/runtime/half_precision/onebit/test_onebit.py index ba795a853be0..14feb54550f0 100644 --- a/tests/unit/runtime/half_precision/onebit/test_onebit.py +++ b/tests/unit/runtime/half_precision/onebit/test_onebit.py @@ -33,6 +33,9 @@ pytest.skip("NCCL-based 1-bit compression is not yet supported w. ROCm 5 until cupy supports ROCm 5", allow_module_level=True) +if get_accelerator().device_name() == 'hpu': + pytest.skip("1-bit compression is not supported by HPU.", allow_module_level=True) + @pytest.mark.parametrize("dtype", [torch.float32, torch.float16], ids=["fp32", "fp16"]) class TestOneBitAdamBasic(DistributedTest): diff --git a/tests/unit/runtime/half_precision/test_bf16.py b/tests/unit/runtime/half_precision/test_bf16.py index 3f551fb0fd4a..fd0b4cb0dcaa 100644 --- a/tests/unit/runtime/half_precision/test_bf16.py +++ b/tests/unit/runtime/half_precision/test_bf16.py @@ -10,8 +10,9 @@ from unit.common import DistributedTest from deepspeed.ops.op_builder import CPUAdamBuilder from unit.simple_model import SimpleModel, SimpleOptimizer, random_dataloader -from unit.util import bf16_required_version_check +from unit.util import bf16_required_version_check, hpu_lazy_enabled from deepspeed import comm as dist +from deepspeed.accelerator import get_accelerator class TestAdamBF16ZeroOneCycleCompatibility(DistributedTest): @@ -196,6 +197,9 @@ def test(self, optimizer_constructor, zero_stage=2): hidden_dim = 10 model = SimpleModel(hidden_dim) + if hpu_lazy_enabled(): + device = get_accelerator().device_name() + model.to(device) client_optimizer = optimizer_constructor(params=model.parameters()) model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, optimizer=client_optimizer) @@ -274,6 +278,9 @@ def test(self, stage=2): hidden_dim = 10 model = SimpleModel(hidden_dim) + if hpu_lazy_enabled(): + device = get_accelerator().current_device_name() + model.to(device) optimizer = torch.optim.Adam(model.parameters()) model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, optimizer=optimizer) data_loader = random_dataloader(model=model, @@ -298,7 +305,10 @@ def test(self, comp_type, comm_type): pytest.skip( " DeepSpeed BFloat16 tests need torch >= 1.10, NCCL >= 2.10.3, CUDA > =11.0 and HW support for BFloat16 to run correctly" ) - + if comm_type and (comp_type not in get_accelerator().supported_dtypes() + or comm_type not in get_accelerator().supported_dtypes()): + pytest.skip( + f"comp_type:{comp_type}, comm_type:{comm_type} not supported by {get_accelerator().device_name()}.") type_str = {torch.float16: "fp16", torch.bfloat16: "bfp16"} config_dict = { @@ -321,6 +331,12 @@ def test(self, comp_type, comm_type): hidden_dim = 10 model = SimpleModel(hidden_dim) + if hpu_lazy_enabled(): + # TODO: remove this when the following is resolved: + # https://jira.habana-labs.com/browse/SW-137450 + config_dict["fp16"]["initial_scale_power"] = 30 + device = get_accelerator().current_device_name() + model.to(device) optimizer = torch.optim.Adam(model.parameters()) model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, optimizer=optimizer) data_loader = random_dataloader(model=model, diff --git a/tests/unit/runtime/half_precision/test_dynamic_loss_scale.py b/tests/unit/runtime/half_precision/test_dynamic_loss_scale.py index 2a58fd6b4a57..ddc274e9bc18 100644 --- a/tests/unit/runtime/half_precision/test_dynamic_loss_scale.py +++ b/tests/unit/runtime/half_precision/test_dynamic_loss_scale.py @@ -6,8 +6,14 @@ import torch import deepspeed import numpy as np +import pytest from unit.common import DistributedTest from unit.simple_model import SimpleModel +from deepspeed.ops.op_builder import FusedLambBuilder +from deepspeed.accelerator import get_accelerator + +if torch.half not in get_accelerator().supported_dtypes(): + pytest.skip(f"fp16 not supported, valid dtype: {get_accelerator().supported_dtypes()}", allow_module_level=True) def run_model_step(model, gradient_list): @@ -38,6 +44,7 @@ def test_no_overflow(self): "loss_scale_window": 2 } } + hidden_dim = 1 model = SimpleModel(hidden_dim) model, optim, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters()) @@ -143,6 +150,7 @@ def test_some_overflow(self): assert optim.cur_iter == expected_iteration +@pytest.mark.skipif(not deepspeed.ops.__compatible_ops__[FusedLambBuilder.NAME], reason="lamb is not compatible") class TestUnfused(DistributedTest): world_size = 1 diff --git a/tests/unit/runtime/half_precision/test_fp16.py b/tests/unit/runtime/half_precision/test_fp16.py index 3d5e18b46502..c2fd929ff7e6 100644 --- a/tests/unit/runtime/half_precision/test_fp16.py +++ b/tests/unit/runtime/half_precision/test_fp16.py @@ -12,7 +12,9 @@ from unit.simple_model import SimpleModel, SimpleOptimizer, random_dataloader, SimpleMoEModel, sequence_dataloader from deepspeed.runtime.utils import required_torch_version from deepspeed.accelerator import get_accelerator -from deepspeed.ops.op_builder import CPUAdamBuilder +from deepspeed.ops.op_builder import CPUAdamBuilder, FusedLambBuilder, FusedAdamBuilder +from unit.util import hpu_lazy_enabled +from deepspeed.moe.utils import split_params_into_different_moe_groups_for_optimizer try: from apex import amp # noqa: F401 # type: ignore @@ -21,7 +23,11 @@ _amp_available = False amp_available = pytest.mark.skipif(not _amp_available, reason="apex/amp is not installed") +if torch.half not in get_accelerator().supported_dtypes(): + pytest.skip(f"fp16 not supported, valid dtype: {get_accelerator().supported_dtypes()}", allow_module_level=True) + +@pytest.mark.skipif(not deepspeed.ops.__compatible_ops__[FusedLambBuilder.NAME], reason="lamb is not compatible") class TestLambFP32GradClip(DistributedTest): world_size = 2 @@ -52,6 +58,7 @@ def test(self): model.step() +@pytest.mark.skipif(not deepspeed.ops.__compatible_ops__[FusedLambBuilder.NAME], reason="lamb is not compatible") class TestLambFP16(DistributedTest): world_size = 2 @@ -187,6 +194,8 @@ def mock_unscale_and_clip_grads(total_norm, apply_scale=True): engine.backward(loss) engine.step() + @pytest.mark.skipif(not deepspeed.ops.__compatible_ops__[FusedAdamBuilder.NAME], + reason="fused adam is not compatible") def test_fused_gradnorm(self, monkeypatch): if not required_torch_version(min_version=1.8): pytest.skip("DeepSpeed MoE tests need torch 1.8 or higher to run correctly") @@ -203,8 +212,10 @@ def mock_unscale_and_clip_grads(grads_groups_flat, total_norm, apply_scale=True) # initialize MoE model = SimpleMoEModel(hidden_dim, ep_size=2) + param_group = {'params': [p for p in model.parameters()], 'name': 'random-unique-name'} + params = split_params_into_different_moe_groups_for_optimizer(param_group) # optimizer = torch.optim.AdamW(params=model.parameters()) - optimizer = FusedAdam(params=model.parameters()) + optimizer = FusedAdam(params=params) engine, optimizer, _, _ = deepspeed.initialize(config=config_dict, model=model, optimizer=optimizer, @@ -216,6 +227,7 @@ def mock_unscale_and_clip_grads(grads_groups_flat, total_norm, apply_scale=True) engine.backward(loss) engine.step() + @pytest.mark.skipif(not deepspeed.ops.__compatible_ops__[FusedLambBuilder.NAME], reason="lamb is not compatible") @pytest.mark.parametrize("fused_lamb_legacy", [(False), (True)]) def test_lamb_gradnorm(self, monkeypatch, fused_lamb_legacy: bool): if not required_torch_version(min_version=1.8): @@ -561,8 +573,10 @@ def test(self, zero_stage, optimizer_constructor): } } hidden_dim = 10 - model = SimpleModel(hidden_dim) + if hpu_lazy_enabled(): + device = get_accelerator().device_name() + model.to(device) client_optimizer = optimizer_constructor(params=model.parameters()) model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, optimizer=client_optimizer) @@ -690,6 +704,9 @@ def test(self, stage): hidden_dim = 10 model = SimpleModel(hidden_dim) + if hpu_lazy_enabled(): + device = get_accelerator().device_name() + model.to(device) optimizer = torch.optim.Adam(model.parameters()) model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, optimizer=optimizer) data_loader = random_dataloader(model=model, total_samples=50, hidden_dim=hidden_dim, device=model.device) diff --git a/tests/unit/runtime/pipe/test_pipe.py b/tests/unit/runtime/pipe/test_pipe.py index 88e26290b650..eee8cf90fb8c 100644 --- a/tests/unit/runtime/pipe/test_pipe.py +++ b/tests/unit/runtime/pipe/test_pipe.py @@ -6,7 +6,6 @@ import copy import torch.nn as nn import pytest - import deepspeed.comm as dist from deepspeed.runtime.pipe.topology import PipeDataParallelTopology from deepspeed.runtime.pipe.module import PipelineModule @@ -67,7 +66,6 @@ def test_pipe_base(self, topo_config): skip_on_arch(min_arch=7) topo = PipeTopo(**topo_config) steps = 100 # must be >=100 - # Allocate model for consistent initial weights. init_net = AlexNetPipe() diff --git a/tests/unit/runtime/sparse_tensor/test_averaging_sparse_gradients.py b/tests/unit/runtime/sparse_tensor/test_averaging_sparse_gradients.py index 92da2257bdb0..89977bbf3668 100644 --- a/tests/unit/runtime/sparse_tensor/test_averaging_sparse_gradients.py +++ b/tests/unit/runtime/sparse_tensor/test_averaging_sparse_gradients.py @@ -5,9 +5,14 @@ import torch import deepspeed +import pytest from unit.common import DistributedTest +from deepspeed.accelerator import get_accelerator from unit.util import skip_on_arch +if get_accelerator().device_name() == 'hpu': + pytest.skip("sparse_gradients not supported by HPU.", allow_module_level=True) + class Model(torch.nn.Module): diff --git a/tests/unit/runtime/sparse_tensor/test_sparse_grads.py b/tests/unit/runtime/sparse_tensor/test_sparse_grads.py index 0689adc08670..01558cb8c6a7 100644 --- a/tests/unit/runtime/sparse_tensor/test_sparse_grads.py +++ b/tests/unit/runtime/sparse_tensor/test_sparse_grads.py @@ -5,10 +5,14 @@ import torch import deepspeed +import pytest from unit.common import DistributedTest - +from deepspeed.accelerator import get_accelerator import deepspeed.utils.groups as groups +if get_accelerator().device_name() == 'hpu': + pytest.skip("sparse_gradients not supported by HPU.", allow_module_level=True) + class Model(torch.nn.Module): @@ -43,7 +47,6 @@ class TestSparseAdam(DistributedTest): def test(self): config_dict = {"train_batch_size": 2, "steps_per_print": 1, "sparse_gradients": True} - model = Model() optimizer = Adam(list(model.linear.parameters()), list(model.emb.parameters())) engine, _, _, _ = deepspeed.initialize(model=model, optimizer=optimizer, config=config_dict) diff --git a/tests/unit/runtime/test_autocast.py b/tests/unit/runtime/test_autocast.py index 9176770afda7..9692f105d69d 100644 --- a/tests/unit/runtime/test_autocast.py +++ b/tests/unit/runtime/test_autocast.py @@ -25,6 +25,7 @@ def test_missing_amp_autocast(self, half_op): output = ds_linear(input) assert output.dtype == ds_linear.weight.dtype + @pytest.mark.skipif(get_accelerator().amp() is None, reason='amp is not installed') def test_disable_autocast_linear(self, half_op): amp = get_accelerator().amp() diff --git a/tests/unit/runtime/test_data_efficiency.py b/tests/unit/runtime/test_data_efficiency.py index b9bd9c3aa56e..afd61b618c87 100644 --- a/tests/unit/runtime/test_data_efficiency.py +++ b/tests/unit/runtime/test_data_efficiency.py @@ -103,9 +103,10 @@ def data_post_process(data, data_sampler_state_dict): assert 'dummy_metric' in data_sampler_state_dict['current_difficulties'] return data + dtype = torch.half hidden_dim = 10 model = SimpleModel(hidden_dim) - dataset = random_dataset(20, hidden_dim, torch.device('cpu'), dtype=torch.half) + dataset = random_dataset(20, hidden_dim, torch.device('cpu'), dtype=dtype) model, _, data_loader, _ = deepspeed.initialize(config=config_dict, model=model, training_data=dataset, @@ -159,9 +160,15 @@ def test_fixed_discrete(self): hidden_dim = 10 ground_truths = {1: 1, 2: 1, 3: 2, 4: 2, 5: 3, 6: 3, 7: 4, 8: 4} + dtype = torch.half + model = Curriculum_SimpleModel(hidden_dim) model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters()) - data_loader = random_dataloader(model=model, total_samples=20, hidden_dim=hidden_dim, device=model.device) + data_loader = random_dataloader(model=model, + total_samples=20, + hidden_dim=hidden_dim, + device=model.device, + dtype=dtype) for n, batch in enumerate(data_loader): loss, seqlen = model(batch[0], batch[1]) model.backward(loss) @@ -202,10 +209,15 @@ def test_fixed_linear(self): } hidden_dim = 10 ground_truths = {1: 2, 2: 4, 3: 4, 4: 6, 5: 6, 6: 8, 7: 8, 8: 10, 9: 10, 10: 10} + dtype = torch.half model = Curriculum_SimpleModel(hidden_dim) model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters()) - data_loader = random_dataloader(model=model, total_samples=20, hidden_dim=hidden_dim, device=model.device) + data_loader = random_dataloader(model=model, + total_samples=20, + hidden_dim=hidden_dim, + device=model.device, + dtype=dtype) for n, batch in enumerate(data_loader): loss, seqlen = model(batch[0], batch[1]) model.backward(loss) diff --git a/tests/unit/runtime/test_ds_config_dict.py b/tests/unit/runtime/test_ds_config_dict.py index 880282bb7e57..fe525fd0b495 100644 --- a/tests/unit/runtime/test_ds_config_dict.py +++ b/tests/unit/runtime/test_ds_config_dict.py @@ -9,6 +9,7 @@ import json import hjson import argparse +import torch from deepspeed.runtime.zero.config import DeepSpeedZeroConfig from deepspeed.accelerator import get_accelerator @@ -163,11 +164,15 @@ class TestConfigLoad(DistributedTest): world_size = 1 def test_dict(self, base_config): + dtype = torch.half + hidden_dim = 10 model = SimpleModel(hidden_dim) model, _, _, _ = deepspeed.initialize(config=base_config, model=model, model_parameters=model.parameters()) def test_json(self, base_config, tmpdir): + dtype = torch.half + config_path = os.path.join(tmpdir, "config.json") with open(config_path, 'w') as fp: json.dump(base_config, fp) @@ -176,6 +181,8 @@ def test_json(self, base_config, tmpdir): model, _, _, _ = deepspeed.initialize(config=config_path, model=model, model_parameters=model.parameters()) def test_hjson(self, base_config, tmpdir): + dtype = torch.half + config_path = os.path.join(tmpdir, "config.json") with open(config_path, 'w') as fp: hjson.dump(base_config, fp) @@ -188,6 +195,8 @@ class TestDeprecatedDeepScaleConfig(DistributedTest): world_size = 1 def test(self, base_config, tmpdir): + + dtype = torch.half config_path = create_config_from_dict(tmpdir, base_config) parser = argparse.ArgumentParser() args = parser.parse_args(args='') @@ -198,7 +207,11 @@ def test(self, base_config, tmpdir): model = SimpleModel(hidden_dim) model, _, _, _ = deepspeed.initialize(args=args, model=model, model_parameters=model.parameters()) - data_loader = random_dataloader(model=model, total_samples=5, hidden_dim=hidden_dim, device=model.device) + data_loader = random_dataloader(model=model, + total_samples=5, + hidden_dim=hidden_dim, + device=model.device, + dtype=dtype) for n, batch in enumerate(data_loader): loss = model(batch[0], batch[1]) model.backward(loss) @@ -210,13 +223,18 @@ class TestDistInit(DistributedTest): def test(self, base_config): hidden_dim = 10 + dtype = torch.half model = SimpleModel(hidden_dim) model, _, _, _ = deepspeed.initialize(config=base_config, model=model, model_parameters=model.parameters(), dist_init_required=True) - data_loader = random_dataloader(model=model, total_samples=5, hidden_dim=hidden_dim, device=model.device) + data_loader = random_dataloader(model=model, + total_samples=5, + hidden_dim=hidden_dim, + device=model.device, + dtype=dtype) for n, batch in enumerate(data_loader): loss = model(batch[0], batch[1]) model.backward(loss) @@ -229,11 +247,15 @@ class TestInitNoOptimizer(DistributedTest): def test(self, base_config): del base_config["optimizer"] hidden_dim = 10 - + dtype = torch.half model = SimpleModel(hidden_dim=hidden_dim) model, _, _, _ = deepspeed.initialize(config=base_config, model=model) - data_loader = random_dataloader(model=model, total_samples=5, hidden_dim=hidden_dim, device=model.device) + data_loader = random_dataloader(model=model, + total_samples=5, + hidden_dim=hidden_dim, + device=model.device, + dtype=dtype) for n, batch in enumerate(data_loader): loss = model(batch[0], batch[1]) with pytest.raises(AssertionError): @@ -246,16 +268,20 @@ class TestArgs(DistributedTest): world_size = 1 def test_none_args(self, base_config): + dtype = torch.half + model = SimpleModel(hidden_dim=10) model, _, _, _ = deepspeed.initialize(args=None, model=model, config=base_config) - data_loader = random_dataloader(model=model, total_samples=5, hidden_dim=10, device=model.device) + data_loader = random_dataloader(model=model, total_samples=5, hidden_dim=10, device=model.device, dtype=dtype) for n, batch in enumerate(data_loader): loss = model(batch[0], batch[1]) def test_no_args(self, base_config): + dtype = torch.half + model = SimpleModel(hidden_dim=10) model, _, _, _ = deepspeed.initialize(model=model, config=base_config) - data_loader = random_dataloader(model=model, total_samples=5, hidden_dim=10, device=model.device) + data_loader = random_dataloader(model=model, total_samples=5, hidden_dim=10, device=model.device, dtype=dtype) for n, batch in enumerate(data_loader): loss = model(batch[0], batch[1]) @@ -265,6 +291,7 @@ class TestNoModel(DistributedTest): def test(self, base_config): model = SimpleModel(hidden_dim=10) + with pytest.raises(AssertionError): model, _, _, _ = deepspeed.initialize(model=None, config=base_config) diff --git a/tests/unit/runtime/test_ds_initialize.py b/tests/unit/runtime/test_ds_initialize.py index 8ec9f05a0a17..fc689828831c 100644 --- a/tests/unit/runtime/test_ds_initialize.py +++ b/tests/unit/runtime/test_ds_initialize.py @@ -11,13 +11,15 @@ from unit.simple_model import SimpleModel, random_dataloader from unit.common import DistributedTest -from unit.util import bf16_required_version_check, required_amp_check +from unit.util import bf16_required_version_check, required_amp_check, hpu_lazy_enabled import deepspeed from deepspeed.ops.adam import FusedAdam from deepspeed.runtime.lr_schedules import WARMUP_LR, WarmupLR from deepspeed.runtime.config import ADAM_OPTIMIZER from deepspeed.runtime.utils import see_memory_usage, required_torch_version +from deepspeed.accelerator import get_accelerator +from deepspeed.ops.op_builder import FusedAdamBuilder @pytest.mark.parametrize('zero_stage', [0, 3]) @@ -43,6 +45,7 @@ def test(self, zero_stage): # 20B test #hidden_dim = 16 * 1024 hidden_dim = 4 + dtype = torch.half with deepspeed.zero.Init(enabled=zero_stage == 3, config_dict_or_path=ds_config): model = SimpleModel(hidden_dim, nlayers=78) @@ -53,7 +56,7 @@ def test(self, zero_stage): total_samples=50, hidden_dim=hidden_dim, device=model.device, - dtype=torch.half) + dtype=dtype) for batch in data_loader: model(batch[0], batch[1]) see_memory_usage('post-fwds', force=True) @@ -68,6 +71,9 @@ def test(self, optimizer_type): def _optimizer_callable(params) -> Optimizer: return AdamW(params=params) + if (optimizer_type is None) and (not deepspeed.ops.__compatible_ops__[FusedAdamBuilder.NAME]): + pytest.skip("FusedAdam is not compatible") + hidden_dim = 10 model = SimpleModel(hidden_dim) @@ -79,13 +85,13 @@ def _optimizer_callable(params) -> Optimizer: client_optimizer = Adam(model.parameters()) else: client_optimizer = _optimizer_callable - _, ds_optimizer, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=list(model.parameters()), optimizer=client_optimizer) if client_optimizer is None: - assert isinstance(ds_optimizer, FusedAdam) + optim = FusedAdam + assert isinstance(ds_optimizer, optim) elif isinstance(client_optimizer, Optimizer): assert ds_optimizer == client_optimizer else: @@ -96,8 +102,11 @@ def _optimizer_callable(params) -> Optimizer: class TestConfigOptimizer(DistributedTest): world_size = 1 + @pytest.mark.skipif(not deepspeed.ops.__compatible_ops__[FusedAdamBuilder.NAME], + reason="FusedAdam is not compatible") def test(self, client_parameters): ds_config = {"train_batch_size": 1, "optimizer": {"type": "Adam", "params": {"lr": 0.001}}} + optimizer = FusedAdam hidden_dim = 10 model = SimpleModel(hidden_dim) @@ -108,8 +117,7 @@ def test(self, client_parameters): model_parameters = None _, ds_optimizer, _, _ = deepspeed.initialize(config=ds_config, model=model, model_parameters=model_parameters) - - assert isinstance(ds_optimizer, FusedAdam) + assert isinstance(ds_optimizer, optimizer) @pytest.mark.parametrize('optimizer_extension', ['zero1', 'zero2', 'zero3', 'amp', None]) @@ -136,6 +144,8 @@ def test(self, optimizer_extension, model_dtype, grad_accum_dtype): pytest.skip( "DeepSpeed BFloat16 tests need torch >= 1.10, NCCL >= 2.10.3, CUDA > =11.0 and HW support for BFloat16 to run correctly" ) + if fp16 and torch.float16 not in get_accelerator().supported_dtypes(): + pytest.skip(f"FP16 not supported by {get_accelerator().device_name()}") if amp and not required_amp_check(): pytest.skip("Amp is not installed can't run amp check") # Config declaration @@ -222,6 +232,9 @@ def test(self, optimizer_extension, model_dtype, grad_accum_dtype): hidden_dim = 10 model = SimpleModel(hidden_dim) + # TODO: SW-145674 remove this WA when SW-145671 is resolved. + if hpu_lazy_enabled(): + model.to(get_accelerator().device_name()) model_parameters = list(model.parameters()) if key in is_supported: diff --git a/tests/unit/runtime/test_multi_output_model.py b/tests/unit/runtime/test_multi_output_model.py index d9aba419b158..6c3263f7729c 100644 --- a/tests/unit/runtime/test_multi_output_model.py +++ b/tests/unit/runtime/test_multi_output_model.py @@ -5,9 +5,14 @@ import torch import deepspeed +import pytest from pytest import approx from unit.common import DistributedTest from unit.multi_output_model import MultiOutputModel, multi_output_dataloader +from deepspeed.accelerator import get_accelerator + +if torch.half not in get_accelerator().supported_dtypes(): + pytest.skip(f"fp16 not supported, valid dtype: {get_accelerator().supported_dtypes()}", allow_module_level=True) class TestTwoOutputModel(DistributedTest): @@ -35,7 +40,6 @@ def test(self, tmpdir): hidden_dim = 10 weight_value = 0.1 - model = MultiOutputModel(hidden_dim, weight_value) model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters()) total_samples = 4 diff --git a/tests/unit/runtime/test_pld.py b/tests/unit/runtime/test_pld.py index 1f602db73b2f..cfe4f3e38b10 100644 --- a/tests/unit/runtime/test_pld.py +++ b/tests/unit/runtime/test_pld.py @@ -5,9 +5,9 @@ import numpy as np import deepspeed +import torch import pytest from deepspeed.runtime.progressive_layer_drop import ProgressiveLayerDrop - from unit.common import DistributedTest from unit.simple_model import SimpleModel, PLD_SimpleModel, random_dataloader @@ -49,11 +49,15 @@ def test_pld_model(self, theta): } } hidden_dim = 10 - + dtype = torch.half model = PLD_SimpleModel(hidden_dim, empty_grad=False) model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters()) - data_loader = random_dataloader(model=model, total_samples=50, hidden_dim=hidden_dim, device=model.device) + data_loader = random_dataloader(model=model, + total_samples=50, + hidden_dim=hidden_dim, + device=model.device, + dtype=dtype) for i, batch in enumerate(data_loader): loss = model(batch[0], batch[1]) @@ -90,11 +94,16 @@ def test_non_pld_model(self): } } hidden_dim = 10 + dtype = torch.half model = SimpleModel(hidden_dim, empty_grad=False) model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters()) - data_loader = random_dataloader(model=model, total_samples=1, hidden_dim=hidden_dim, device=model.device) + data_loader = random_dataloader(model=model, + total_samples=1, + hidden_dim=hidden_dim, + device=model.device, + dtype=dtype) for i, batch in enumerate(data_loader): with pytest.raises(TypeError): diff --git a/tests/unit/runtime/zero/test_ignore_unused_parameters.py b/tests/unit/runtime/zero/test_ignore_unused_parameters.py index aade488fde42..baee69a4e3fa 100644 --- a/tests/unit/runtime/zero/test_ignore_unused_parameters.py +++ b/tests/unit/runtime/zero/test_ignore_unused_parameters.py @@ -4,10 +4,10 @@ # DeepSpeed Team import pytest +import torch from unit.common import DistributedTest from unit.simple_model import UnusedParametersModel, random_dataloader from deepspeed.ops.op_builder import CPUAdamBuilder - import deepspeed @@ -42,11 +42,16 @@ def test(self, ignore_unused_parameters): } } hidden_dim = 4 + dtype = torch.half model = UnusedParametersModel(hidden_dim=hidden_dim) model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters()) - data_loader = random_dataloader(model=model, total_samples=10, hidden_dim=hidden_dim, device=model.device) + data_loader = random_dataloader(model=model, + total_samples=10, + hidden_dim=hidden_dim, + device=model.device, + dtype=dtype) def _loop(): for n, batch in enumerate(data_loader): diff --git a/tests/unit/runtime/zero/test_zero.py b/tests/unit/runtime/zero/test_zero.py index 5a8af95bb0f8..ac07334f52d8 100644 --- a/tests/unit/runtime/zero/test_zero.py +++ b/tests/unit/runtime/zero/test_zero.py @@ -25,6 +25,7 @@ from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint from deepspeed.runtime.zero.utils import ZeRORuntimeException from deepspeed.accelerator import get_accelerator +from unit.util import hpu_lazy_enabled def run_unbalanced_gradients(model, data_loader): @@ -77,10 +78,14 @@ def test(self, zero_stage): }, } hidden_dim = 4 - + dtype = torch.half model = SimpleModel(hidden_dim=hidden_dim) model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters()) - data_loader = random_dataloader(model=model, total_samples=16, hidden_dim=hidden_dim, device=model.device) + data_loader = random_dataloader(model=model, + total_samples=16, + hidden_dim=hidden_dim, + device=model.device, + dtype=dtype) run_unbalanced_gradients(model, data_loader) @@ -117,6 +122,7 @@ def test(self, mics_enabled, zero_stage=3): }, } hidden_dim = 4 + dtype = torch.half class AlbertLikeModel(torch.nn.Module): @@ -134,7 +140,11 @@ def forward(self, x, y): model = AlbertLikeModel(hidden_dim=hidden_dim) model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters()) - data_loader = random_dataloader(model=model, total_samples=16, hidden_dim=hidden_dim, device=model.device) + data_loader = random_dataloader(model=model, + total_samples=16, + hidden_dim=hidden_dim, + device=model.device, + dtype=dtype) for i, batch in enumerate(data_loader): loss = model(batch[0], batch[1]) @@ -195,6 +205,7 @@ def forward(self, x, y): return self.cross_entropy_loss(hidden, y) hidden_dim = 3 # do not change + dtype = torch.half world_size = dist.get_world_size() # we want at least 2x layers as there are gpus to trigger round_robin_fp16_groups reshuffle in zero2 @@ -202,10 +213,15 @@ def forward(self, x, y): model = MyModel(hidden_dim=hidden_dim, n_layers=n_layers, freeze_params=freeze_params) model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters()) + # Flush zero stage 3 cache model.empty_partition_cache() - data_loader = random_dataloader(model=model, total_samples=16, hidden_dim=hidden_dim, device=model.device) + data_loader = random_dataloader(model=model, + total_samples=16, + hidden_dim=hidden_dim, + device=model.device, + dtype=dtype) for i, batch in enumerate(data_loader): loss = model(batch[0], batch[1]) @@ -283,10 +299,13 @@ def forward(self, x, y): return self.cross_entropy_loss(hidden, y) hidden_dim = 3 + dtype = torch.half world_size = dist.get_world_size() n_layers = world_size * 2 model = MyModel(hidden_dim=hidden_dim, n_layers=n_layers, freeze_params=freeze_params) + if hpu_lazy_enabled(): + model.to(get_accelerator().device_name()) optim_groups = [ { @@ -308,7 +327,11 @@ def forward(self, x, y): ) model.empty_partition_cache() - data_loader = random_dataloader(model=model, total_samples=16, hidden_dim=hidden_dim, device=model.device) + data_loader = random_dataloader(model=model, + total_samples=16, + hidden_dim=hidden_dim, + device=model.device, + dtype=dtype) for i, batch in enumerate(data_loader): loss = model(batch[0], batch[1]) @@ -372,6 +395,7 @@ def test(self, allgather_bucket_size, zero_stage=2): }, } hidden_dim = 4 + dtype = torch.half model = SimpleModel(hidden_dim=hidden_dim) if allgather_bucket_size % 2 == 0: @@ -407,6 +431,7 @@ def test(self, zero_stage=2): }, } hidden_dim = 4 + dtype = torch.half model = SimpleModel(hidden_dim=hidden_dim) model, _, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters()) @@ -423,6 +448,7 @@ def test(self, zero_stage=2): def _ds_initialize_for_param_partitioning_testing(model: Module, cfg: dict) -> DeepSpeedEngine: + ds_engine, _, _, _ = deepspeed.initialize(config=cfg, model=model, model_parameters=model.parameters()) return ds_engine @@ -864,12 +890,15 @@ def forward(self, x: Tensor) -> Tensor: "loss_scale": 1.0, }, } - with deepspeed.zero.Init(mem_efficient_linear=False, enabled=init_context_manager): + dtype = torch.float16 + zero3_init_dtype = None + + with deepspeed.zero.Init(dtype=zero3_init_dtype, mem_efficient_linear=False, enabled=init_context_manager): model = LargeParamModel() ds_engine = _ds_initialize_for_param_partitioning_testing(model, ds_config) for train_iter in range(3): # test multiple iterations to cover prefetching - activation: Tensor = ds_engine(torch.ones(param_sz, dtype=torch.float16, device=ds_engine.device)) + activation: Tensor = ds_engine(torch.ones(param_sz, dtype=dtype, device=ds_engine.device)) partition_sz = math.ceil(param_sz / self.world_size) for rank_idx, start_idx in enumerate(range(0, param_sz, partition_sz)): @@ -900,7 +929,6 @@ class ManyParamModel(Module): def __init__(self) -> None: super().__init__() - self.modulelist = ModuleList( EltwiseMultiplicationModule(weight=Parameter(torch.empty((param_sz, ), dtype=torch.float32))) for _ in range(n_layers)) @@ -943,6 +971,7 @@ def forward(self, x: Tensor) -> Tensor: "loss_scale": 1.0, }, } + dtype = torch.half with deepspeed.zero.Init(config=ds_cfg, mem_efficient_linear=False, enabled=init_context_manager): model = ManyParamModel() @@ -950,12 +979,11 @@ def forward(self, x: Tensor) -> Tensor: ds_engine = _ds_initialize_for_param_partitioning_testing(model, ds_cfg) for _ in range(3): # test multiple iterations to cover prefetching - activations: List[Tensor] = ds_engine( - torch.ones((param_sz, ), dtype=torch.float16, device=ds_engine.device)) + activations: List[Tensor] = ds_engine(torch.ones((param_sz, ), dtype=dtype, device=ds_engine.device)) assert len(activations) == n_layers partition_sz = math.ceil(param_sz / self.world_size) - expected_activations = torch.empty(param_sz, dtype=torch.float16, device=ds_engine.device) + expected_activations = torch.empty(param_sz, dtype=dtype, device=ds_engine.device) for start_idx in range(0, param_sz, partition_sz): expected_activations[start_idx:start_idx + partition_sz] = dist.get_rank() @@ -983,8 +1011,8 @@ class ModelWhereParentInitializesChildWeights(Module): def __init__(self) -> None: super().__init__() - - self.linear = Linear(12, 1) + dev = get_accelerator().device_name() + self.linear = Linear(12, 1, device=dev) self.apply(self.__init_weights) @@ -1295,10 +1323,15 @@ def test(self): }, } hidden_dim = 10 + dtype = torch.half model = SimpleModel(hidden_dim) model, _, _, _ = deepspeed.initialize(model=model, model_parameters=model.parameters(), config=config_dict) - data_loader = random_dataloader(model=model, total_samples=50, hidden_dim=hidden_dim, device=model.device) + data_loader = random_dataloader(model=model, + total_samples=50, + hidden_dim=hidden_dim, + device=model.device, + dtype=dtype) dist.barrier() for n, batch in enumerate(data_loader): loss = model(batch[0], batch[1]) @@ -1328,6 +1361,8 @@ def test(self, return_type): }, } hidden_dim = 10 + dtype = torch.half + zero3_init_dtype = None class MyModel(torch.nn.Module): @@ -1349,11 +1384,15 @@ def forward(self, x, y): raise NotImplementedError return val - with deepspeed.zero.Init(): + with deepspeed.zero.Init(dtype=zero3_init_dtype): model = MyModel(hidden_dim) model, _, _, _ = deepspeed.initialize(model=model, model_parameters=model.parameters(), config=config_dict) - data_loader = random_dataloader(model=model, total_samples=50, hidden_dim=hidden_dim, device=model.device) + data_loader = random_dataloader(model=model, + total_samples=50, + hidden_dim=hidden_dim, + device=model.device, + dtype=dtype) dist.barrier() for n, batch in enumerate(data_loader): loss = model(batch[0], batch[1]) @@ -1397,12 +1436,17 @@ def test(self, zero_stage): }, } hidden_dim = 4 + dtype = torch.half model = SimpleModel(hidden_dim=hidden_dim, nlayers=12) model, optimizer, _, _ = deepspeed.initialize(config=config_dict, model=model, model_parameters=model.parameters()) - data_loader = random_dataloader(model=model, total_samples=16, hidden_dim=hidden_dim, device=model.device) + data_loader = random_dataloader(model=model, + total_samples=16, + hidden_dim=hidden_dim, + device=model.device, + dtype=dtype) assert model.global_steps == 0 @@ -1475,11 +1519,16 @@ def forward(self, x, y): val = (x, loss) return val - with deepspeed.zero.Init(config_dict_or_path=config_dict, enabled=zero_stage == 3): - model = MyModel(hidden_dim) + dtype = torch.float16 + with deepspeed.zero.Init(dtype=dtype, config_dict_or_path=config_dict, enabled=zero_stage == 3): + model = MyModel(hidden_dim) model, _, _, _ = deepspeed.initialize(model=model, model_parameters=model.parameters(), config=config_dict) - data_loader = random_dataloader(model=model, total_samples=50, hidden_dim=hidden_dim, device=model.device) + data_loader = random_dataloader(model=model, + total_samples=50, + hidden_dim=hidden_dim, + device=model.device, + dtype=dtype) dist.barrier() for n, batch in enumerate(data_loader): loss = model(batch[0], batch[1]) @@ -1509,8 +1558,10 @@ def test(self, force_ds_optim): "zero_force_ds_cpu_optimizer": force_ds_optim, } hidden_dim = 10 - model = SimpleModel(hidden_dim) + if hpu_lazy_enabled(): + device = get_accelerator().current_device_name() + model.to(device) optimizer = torch.optim.Adam(model.parameters()) @@ -1538,6 +1589,7 @@ def test_training_partition_cache(self, training): "stage3_param_persistence_threshold": hidden_dim, }, } + dtype = torch.half if training: config_dict["optimizer"] = {"type": "Adam"} @@ -1546,7 +1598,6 @@ def test_training_partition_cache(self, training): model, _, _, _ = deepspeed.initialize(model=model, config=config_dict) - dtype = torch.half data_loader = random_dataloader( model=model, total_samples=6, @@ -1602,7 +1653,6 @@ def test_empty_param_groups(self, dtype, use_client_optimizer, empty_weight_grou "enabled": dtype == torch.bfloat16 } } - if use_client_optimizer: optimizer = torch.optim.AdamW(param_groups, lr=0.1) model_parameters = model.parameters() diff --git a/tests/unit/runtime/zero/test_zero_context.py b/tests/unit/runtime/zero/test_zero_context.py index 0ddf1026eaf8..eaf98966ed72 100644 --- a/tests/unit/runtime/zero/test_zero_context.py +++ b/tests/unit/runtime/zero/test_zero_context.py @@ -71,7 +71,8 @@ def __init__(self, hidden_dim): super(MyModel, self).__init__() self.l1 = torch.nn.Linear(hidden_dim, hidden_dim) - with deepspeed.zero.Init(config_dict_or_path=config_dict): + dtype = None + with deepspeed.zero.Init(config_dict_or_path=config_dict, dtype=dtype): model = MyModel(hidden_dim) with deepspeed.zero.GatheredParameters(list(model.parameters())): @@ -111,7 +112,8 @@ class TestSerialContext(DistributedTest): def test_subclass_param(self): setup_serial_env() - with deepspeed.zero.Init(config=config): + dtype = None + with deepspeed.zero.Init(config=config, dtype=dtype): model = ConvNet() assert model.param.ds_status == ZeroParamStatus.NOT_AVAILABLE @@ -238,7 +240,7 @@ def forward(self, input): return C.sum() net = ExtLinear() - + dtype = torch.float16 args = SimpleNamespace(local_rank=0) engine, optim, _, _ = deepspeed.initialize(args=args, model=net, @@ -248,7 +250,7 @@ def forward(self, input): with deepspeed.zero.GatheredParameters(net.linear1.weight): assert net.linear1.weight.numel() == net.dim**2 - input = torch.rand(net.dim).to(engine.device).half() + input = torch.rand(net.dim).to(engine.device).to(dtype) loss = engine(input) engine.backward(loss) engine.step() @@ -258,7 +260,8 @@ class TestScatterGather(DistributedTest): world_size = 2 def test(self): - with deepspeed.zero.Init(): + dtype = None + with deepspeed.zero.Init(dtype=dtype): l = torch.nn.Linear(6, 3) assert l.weight.ds_status == ZeroParamStatus.NOT_AVAILABLE assert l.weight.shape == torch.Size(partitioned_param_data_shape) @@ -277,7 +280,8 @@ class TestGatherUpdate(DistributedTest): world_size = 2 def test(self): - with deepspeed.zero.Init(): + dtype = torch.float16 + with deepspeed.zero.Init(dtype=dtype): l = torch.nn.Linear(4, 2) assert l.weight.ds_status == ZeroParamStatus.NOT_AVAILABLE diff --git a/tests/unit/runtime/zero/test_zero_context_ancestry.py b/tests/unit/runtime/zero/test_zero_context_ancestry.py index 21955f5df152..1a631bb3b805 100644 --- a/tests/unit/runtime/zero/test_zero_context_ancestry.py +++ b/tests/unit/runtime/zero/test_zero_context_ancestry.py @@ -67,7 +67,8 @@ class TestSerialParamInit(DistributedTest): def test_subclass_param_init(self): setup_serial_env() - with deepspeed.zero.Init(config=config): + dtype = None + with deepspeed.zero.Init(config=config, dtype=dtype): model = Son().cpu() # test that all params have been partitioned @@ -107,7 +108,8 @@ def __init__(self): def magic(self): return 42 - with deepspeed.zero.Init(): + dtype = torch.float16 + with deepspeed.zero.Init(dtype=dtype): model = Model() engine, *_ = deepspeed.initialize(model=model, config=ds_config, model_parameters=model.parameters()) assert engine.magic() == 42 diff --git a/tests/unit/runtime/zero/test_zero_context_return.py b/tests/unit/runtime/zero/test_zero_context_return.py index 874a8ea3b676..884dc3095e04 100644 --- a/tests/unit/runtime/zero/test_zero_context_return.py +++ b/tests/unit/runtime/zero/test_zero_context_return.py @@ -137,12 +137,12 @@ def test_ext_param_return(self): setup_serial_env() net = DanglingExt() - + dtype = torch.float16 args = SimpleNamespace(local_rank=0) engine, _, _, _ = deepspeed.initialize(args=args, model=net, model_parameters=net.parameters(), config=config) for _ in range(5): - input = torch.rand(net.dim).to(engine.device).half() + input = torch.rand(net.dim).to(engine.device).to(dtype) loss = engine(input) engine.backward(loss) engine.step() @@ -151,14 +151,14 @@ def test_ext_param_return(self): def test_ext_param_returnobj(self): setup_serial_env() print() - + dtype = torch.float16 net = ModelContainer(return_obj=True) args = SimpleNamespace(local_rank=0) engine, _, _, _ = deepspeed.initialize(args=args, model=net, model_parameters=net.parameters(), config=config) for _ in range(5): - input = torch.rand(net.dim).to(engine.device).half() + input = torch.rand(net.dim).to(engine.device).to(dtype) loss = engine(input) assert len(net._external_params) == 1 assert len(net.dangler._external_params) == 0 @@ -169,14 +169,14 @@ def test_ext_param_returnobj(self): def test_stage_3_output_type(self, output_type): setup_serial_env() print() - + dtype = torch.float16 net = ModelContainerVariableOutputType(output_type=output_type) args = SimpleNamespace(local_rank=0) engine, _, _, _ = deepspeed.initialize(args=args, model=net, model_parameters=net.parameters(), config=config) for _ in range(1): - input = torch.rand(net.dim).to(engine.device).half() + input = torch.rand(net.dim).to(engine.device).to(dtype) loss = engine(input) if loss is not None: if isinstance(loss, dict): diff --git a/tests/unit/runtime/zero/test_zero_dynamic_class.py b/tests/unit/runtime/zero/test_zero_dynamic_class.py index e235206d4dc4..b2cb66dfc79c 100644 --- a/tests/unit/runtime/zero/test_zero_dynamic_class.py +++ b/tests/unit/runtime/zero/test_zero_dynamic_class.py @@ -6,7 +6,6 @@ import torch from unit.common import DistributedTest - import deepspeed @@ -15,7 +14,6 @@ class TestNewClassDeclaredNestingInit(DistributedTest): def test_new_class_declared_nesting_init(self): ds_config = dict(train_batch_size=1, zero_optimization=dict(stage=3)) - with deepspeed.zero.Init(config_dict_or_path=ds_config): class MyModel(torch.nn.Module): diff --git a/tests/unit/runtime/zero/test_zero_nesting_init.py b/tests/unit/runtime/zero/test_zero_nesting_init.py index 15d82fd8be00..fc40de3da229 100644 --- a/tests/unit/runtime/zero/test_zero_nesting_init.py +++ b/tests/unit/runtime/zero/test_zero_nesting_init.py @@ -18,7 +18,6 @@ class TestNestingInit(DistributedTest): def test_nesting_init(self): ds_config = dict(train_batch_size=1, zero_optimization=dict(stage=3)) - with deepspeed.zero.Init(config_dict_or_path=ds_config): with deepspeed.zero.Init(config_dict_or_path=ds_config): model = torch.nn.Linear(4, 4) diff --git a/tests/unit/runtime/zero/test_zero_offloadpp.py b/tests/unit/runtime/zero/test_zero_offloadpp.py index 5bfec399e19f..a28f2385a0f2 100644 --- a/tests/unit/runtime/zero/test_zero_offloadpp.py +++ b/tests/unit/runtime/zero/test_zero_offloadpp.py @@ -6,11 +6,8 @@ import deepspeed.comm as dist from unit.common import DistributedTest from unit.simple_model import random_dataloader - import deepspeed - from deepspeed.runtime.zero.offload_config import DeepSpeedZeroOffloadOptimizerConfig - import torch.nn as nn diff --git a/tests/unit/runtime/zero/test_zero_tensor_fragment.py b/tests/unit/runtime/zero/test_zero_tensor_fragment.py index b3adfdf96c50..ea118bba2801 100644 --- a/tests/unit/runtime/zero/test_zero_tensor_fragment.py +++ b/tests/unit/runtime/zero/test_zero_tensor_fragment.py @@ -120,6 +120,7 @@ def test_zero_fragments(self, tmpdir, api_type, zero_stage, offload_device, froz "stage": zero_stage, } } + dtype = torch.float16 if offload_device == OffloadDeviceEnum.cpu: config_dict["zero_optimization"]["offload_optimizer"] = {"device": offload_device} @@ -131,7 +132,7 @@ def test_zero_fragments(self, tmpdir, api_type, zero_stage, offload_device, froz hidden_dim = 128 if zero_stage == 3: - with deepspeed.zero.Init(config_dict_or_path=config_dict): + with deepspeed.zero.Init(config_dict_or_path=config_dict, dtype=dtype): model = MyModel(hidden_dim, frozen_weights) else: model = MyModel(hidden_dim, frozen_weights) @@ -305,7 +306,6 @@ def test_zero_fragments(self, tmpdir, api_type, zero_stage, offload_device, dtyp config_dict["fp16"] = {"enabled": True, "initial_scale_power": 8} elif dtype == torch.bfloat16: config_dict["bf16"] = {"enabled": True} - hidden_dim = 128 if zero_stage == 3: config_dict["zero_optimization"]["param_persistence_threshold"] = hidden_dim diff --git a/tests/unit/runtime/zero/test_zeropp.py b/tests/unit/runtime/zero/test_zeropp.py index 7a05c2a8001b..545ed98ad2ef 100644 --- a/tests/unit/runtime/zero/test_zeropp.py +++ b/tests/unit/runtime/zero/test_zeropp.py @@ -202,7 +202,7 @@ def load_and_prepare_data(self, model_name): tokenizer.pad_token = tokenizer.eos_token # Load and tokenize dataset - dataset = load_dataset("wikitext", 'wikitext-103-raw-v1', split='train[:1%]').filter(lambda x: x["text"]) + dataset = load_dataset("wikitext", 'wikitext-103-raw-v1', split='train[:1%]') def tokenize_function(examples): # Tokenize and ensure 'labels' are the same as 'input_ids' diff --git a/tests/unit/skip_marker.py b/tests/unit/skip_marker.py new file mode 100644 index 000000000000..7425eb2a0fea --- /dev/null +++ b/tests/unit/skip_marker.py @@ -0,0 +1,68 @@ +# Copyright (c) 2023 Habana Labs, Ltd. an Intel Company +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +hpu_lazy_skip_tests = {} + +g1_lazy_skip_tests = { + "unit/inference/test_human_eval.py::test_human_eval[codellama/CodeLlama-7b-Python-hf]": + "HPU is not supported on deepspeed-mii", +} + +g2_lazy_skip_tests = { + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[4-9-1024]": + "Stuck, SW-190067.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test_eval[4-9-1024]": + "stuck, SW-190067.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test_gradient_accumulation[4-9-1024]": + "Stuck, SW-190067.", + "unit/inference/test_human_eval.py::test_human_eval[codellama/CodeLlama-7b-Python-hf]": + "HPU is not supported on deepspeed-mii", +} + +g3_lazy_skip_tests = { + "unit/inference/test_inference.py::TestLMCorrectness::test[lambada_standard-gpt2-openai-community/gpt2-xl]": + "Skip hang download", + "unit/inference/test_inference.py::TestLMCorrectness::test[lambada_standard-gpt2-EleutherAI/gpt-neo-2.7B]": + "Skip hang download" +} +hpu_eager_skip_tests = {} + +g1_eager_skip_tests = { + "unit/inference/test_inference.py::TestMPSize::test[fp32-gpt-neo]": + "Flaky Segfault. Stuck", + "unit/inference/test_human_eval.py::test_human_eval[codellama/CodeLlama-7b-Python-hf]": + "HPU is not supported on deepspeed-mii", +} + +g2_eager_skip_tests = { + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[4-9-1024]": + "Stuck, SW-190067.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test_eval[4-9-1024]": + "stuck, SW-190067.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test_gradient_accumulation[4-9-1024]": + "Stuck, SW-190067.", + "unit/inference/test_human_eval.py::test_human_eval[codellama/CodeLlama-7b-Python-hf]": + "HPU is not supported on deepspeed-mii", +} +g3_eager_skip_tests = { + "unit/inference/test_inference.py::TestLMCorrectness::test[lambada_standard-gpt2-openai-community/gpt2-xl]": + "Skip hang download", + "unit/inference/test_inference.py::TestLMCorrectness::test[lambada_standard-gpt2-EleutherAI/gpt-neo-2.7B]": + "Skip hang download", + "unit/inference/test_inference.py::TestMPSize::test[fp16-gpt-neo]": "Skip synapse Device restart", + "unit/inference/test_inference.py::TestMPSize::test[fp32-gpt-neo]": "Skip Seg fault Internal Error", +} + +gpu_skip_tests = { + "unit/runtime/zero/test_zero.py::TestZeroOffloadOptim::test[True]": + "Disabled as it is causing test to stuck. SW-163517.", + "unit/inference/test_stable_diffusion.py::TestStableDiffusion::test": + "Xfail not supported", + "unit/inference/test_inference.py::TestLMCorrectness::test[lambada_standard-gpt2-openai-community/gpt2-xl]": + "skip: timeout triggered", + "unit/inference/test_inference.py::TestLMCorrectness::test[lambada_standard-gpt2-EleutherAI/gpt-neo-2.7B]": + "skip: timeout triggered", +} diff --git a/tests/unit/util.py b/tests/unit/util.py index 75c3000bd4a2..9d00da36ea0e 100644 --- a/tests/unit/util.py +++ b/tests/unit/util.py @@ -5,6 +5,8 @@ import pytest import torch +import os +import multiprocessing from deepspeed.accelerator import get_accelerator, is_current_accelerator_supported from deepspeed.git_version_info import torch_info from packaging import version as pkg_version @@ -47,11 +49,14 @@ def bf16_required_version_check(accelerator_check=True): cuda_version_available = CUDA_MAJOR >= 11 nccl_version_available = NCCL_MAJOR > 2 or (NCCL_MAJOR == 2 and NCCL_MINOR >= 10) npu_available = get_accelerator().device_name() == 'npu' + hpu_available = get_accelerator().device_name() == 'hpu' if torch_version_available and cuda_version_available and nccl_version_available and accelerator_pass: return True elif npu_available: return True + elif hpu_available: + return True else: return False @@ -76,3 +81,57 @@ def required_amp_check(): return False else: return True + + +def worker(proc_id, return_dict): + #TODO SW-114787: move to new api outside experimental + import habana_frameworks.torch.utils.experimental as htexp + deviceType = htexp._get_device_type() + if deviceType == htexp.synDeviceType.synDeviceGaudi: + return_dict['devicetype'] = "Gaudi" + elif deviceType == htexp.synDeviceType.synDeviceGaudi2: + return_dict['devicetype'] = "Gaudi2" + elif deviceType == htexp.synDeviceType.synDeviceGaudi3: + return_dict['devicetype'] = "Gaudi3" + else: + return_dict['devicetype'] = None + assert False, f'Unexpected hpu device Type: {deviceType}' + + +def get_hpu_dev_version(): + hpu_dev = None + if get_accelerator().device_name() != 'hpu': + return hpu_dev + if os.getenv("DEEPSPEED_UT_HL_DEVICE", default=None): + hpu_dev = os.getenv("DEEPSPEED_UT_HL_DEVICE") + if hpu_dev not in ["Gaudi", "Gaudi2", "Gaudi3"]: + manager = multiprocessing.Manager() + return_dict = manager.dict() + proc_id = 0 + multiprocessing.set_start_method("spawn", force=True) + p = multiprocessing.Process(target=worker, args=(proc_id, return_dict)) + p.start() + p.join() + try: + dev_type = return_dict['devicetype'] + except: + assert False, 'Unexpected hpu device Type: {}'.format(return_dict['devicetype']) + p.terminate() + exit_code = p.exitcode + if exit_code: + assert False, 'HPU dev type process exit with: {}'.format(exit_code) + if dev_type in ["Gaudi", "Gaudi2", "Gaudi3"]: + hpu_dev = dev_type + os.environ['DEEPSPEED_UT_HL_DEVICE'] = dev_type + return dev_type + else: + assert False, 'Unexpected hpu device Type: {}'.format(return_dict['devicetype']) + else: + return hpu_dev + + +def hpu_lazy_enabled(): + if get_accelerator().device_name() == 'hpu': + import habana_frameworks.torch.hpu as thpu + return thpu.is_lazy() + return False diff --git a/tests/unit/utils/test_groups.py b/tests/unit/utils/test_groups.py index d8f12be4f3c6..5cd35baf3510 100644 --- a/tests/unit/utils/test_groups.py +++ b/tests/unit/utils/test_groups.py @@ -18,7 +18,7 @@ def test_get_expert_parallel_ranks(): expert_data_parallel_group = [0,8],[2,10],[4,12],[6,14], [1,9],[3,11],[5,13],[7,15] """ expert_parallel_groups, expert_data_parallel_groups = _get_expert_parallel_ranks(world_size=16, - model_parallel_size_=2, + tensor_parallel_size_=2, expert_parallel_size_=4) assert expert_parallel_groups == [ [0, 2, 4, 6], diff --git a/tests/unit/utils/test_init_on_device.py b/tests/unit/utils/test_init_on_device.py index 5d84e9be855a..d9ff7eee1d7b 100644 --- a/tests/unit/utils/test_init_on_device.py +++ b/tests/unit/utils/test_init_on_device.py @@ -20,9 +20,10 @@ def test_on_device(self, device): if device == "meta" and pkg_version.parse(torch.__version__) < pkg_version.parse("1.10"): pytest.skip("meta tensors only became stable after torch 1.10") - with OnDevice(dtype=torch.half, device=device): + dtype = torch.half + with OnDevice(dtype=dtype, device=device): model = SimpleModel(4) for p in model.parameters(): assert p.device == torch.device(device) - assert p.dtype == torch.half + assert p.dtype == dtype diff --git a/tests/unit/xfail_marker.py b/tests/unit/xfail_marker.py new file mode 100644 index 000000000000..d835f7df4cd2 --- /dev/null +++ b/tests/unit/xfail_marker.py @@ -0,0 +1,5903 @@ +# Copyright (c) 2023 Habana Labs, Ltd. an Intel Company +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +hpu_lazy_xfail_tests = {} + +g1_lazy_xfail_tests = { + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-bf16-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-cased-fill-mask-fp32-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-multilingual-cased-fill-mask-bf16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-uncased-fill-mask-fp32-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[roberta-large-fill-mask-bf16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-multilingual-cased-fill-mask-bf16-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-cased-fill-mask-fp32-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[roberta-base-fill-mask-bf16-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[roberta-large-fill-mask-fp32-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[roberta-base-fill-mask-fp32-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-multilingual-uncased-fill-mask-fp32-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp32-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp32-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-bf16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-uncased-fill-mask-fp32-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-cased-fill-mask-bf16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-bf16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp32-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-multilingual-cased-fill-mask-fp32-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-uncased-fill-mask-bf16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[roberta-base-fill-mask-fp32-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-multilingual-uncased-fill-mask-fp32-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-uncased-fill-mask-fp32-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-multilingual-uncased-fill-mask-bf16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[roberta-large-fill-mask-fp32-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-cased-fill-mask-bf16-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-uncased-fill-mask-bf16-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp32-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[roberta-large-fill-mask-bf16-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-bf16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp32-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-bf16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-cased-fill-mask-bf16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-bf16-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-multilingual-cased-fill-mask-fp32-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-bf16-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-cased-fill-mask-fp32-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp32-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-uncased-fill-mask-bf16-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp32-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[roberta-base-fill-mask-bf16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-uncased-fill-mask-bf16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-bf16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-cased-fill-mask-bf16-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp32-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-bf16-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp32-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-bf16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-bf16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp32-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-bf16-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp32-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-bf16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-cased-fill-mask-fp32-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-uncased-fill-mask-fp32-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-multilingual-uncased-fill-mask-bf16-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[EleutherAI/pythia-70m-deduped-text-generation-fp32-noCG-noTriton]": + "Xfail, due to SW-163095.", + "unit/inference/test_inference.py::TestModelTask::test[EleutherAI/pythia-70m-deduped-text-generation-bf16-noCG-noTriton]": + "Xfail, due to SW-163095.", + "unit/inference/test_inference.py::TestModelTask::test[distilgpt2-text-generation-fp32-noCG-noTriton]": + "Xfail, due to SW-163098.", + "unit/inference/test_inference.py::TestModelTask::test[distilgpt2-text-generation-bf16-noCG-noTriton]": + "Xfail, due to SW-163098.", + "unit/inference/test_inference.py::TestModelTask::test[distilbert-base-cased-distilled-squad-question-answering-bf16-noCG-noTriton]": + "Xfail, failed on vanilla as well.", + "unit/inference/test_inference.py::TestModelTask::test[gpt2-text-generation-bf16-noCG-noTriton]": + "Xfail, due to SW-163098.", + "unit/inference/test_inference.py::TestModelTask::test[distilbert-base-cased-distilled-squad-question-answering-fp32-CG-noTriton]": + "Xfail, failed on vanilla as well.", + "unit/inference/test_inference.py::TestModelTask::test[EleutherAI/gpt-j-6b-text-generation-fp32-noCG-noTriton]": + "Xfail, due to SW-163098.", + "unit/inference/test_inference.py::TestModelTask::test[distilbert-base-cased-distilled-squad-question-answering-fp32-noCG-noTriton]": + "Xfail, failed on vanilla as well.", + "unit/inference/test_inference.py::TestModelTask::test[distilbert-base-cased-distilled-squad-question-answering-bf16-CG-noTriton]": + "Xfail, failed on vanilla as well.", + "unit/inference/test_inference.py::TestModelTask::test[Norod78/hebrew-bad_wiki-gpt_neo-tiny-text-generation-bf16-noCG-noTriton]": + "Xfail, due to SW-163098.", + "unit/inference/test_inference.py::TestModelTask::test[EleutherAI/gpt-j-6b-text-generation-bf16-noCG-noTriton]": + "Xfail, due to SW-163098.", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[1024-fp16]": + "Xfail, due to SW-162575.", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[64-fp16]": + "Xfail, due to SW-162575.", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[1048576-fp16]": + "Xfail, due to SW-162575.", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[128-fp16]": + "Xfail, due to SW-162575.", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[22-fp16]": + "Xfail, due to SW-162575.", + "unit/runtime/test_ds_initialize.py::TestOptimizerImplementation::test[fp16-fp32-zero3]": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_ds_initialize.py::TestOptimizerImplementation::test[fp16-bf16-zero3]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_int4_quantization.py::TestQuantizedInt4::test_model_quantization": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_int4_quantization.py::TestQuantizedInt4::test_zero3_int4_post_init_quant": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_int4_quantization.py::TestQuantizedInt4::test_zero3_int4_quantized_initialization_nvme_offload": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_int4_quantization.py::TestQuantizedInt4::test_zero3_int4_post_init_quant_cpu_offload": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_int4_quantization.py::TestQuantizedInt4::test_half_int4_quantization": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_int4_quantization.py::TestQuantizedInt4::test_zero3_int4_quantized_initialization_cpu_offload": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_int4_quantization.py::TestQuantizedInt4::test_quantized_linear": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_int4_quantization.py::TestQuantizedInt4::test_half_int8_quantization": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_int4_quantization.py::TestQuantizedInt4::test_zero3_int4_quantized_initialization": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_int4_quantization.py::TestQuantizedInt4::test_zero3_int4_post_init_quant_nvme_offload": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_pipeline_checkpoint_loading[3-False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[cpu-3-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[none-3-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_mup_optimizers.py::TestMuPOptimizers::test[True-MuAdamW-AdamW]": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_mup_optimizers.py::TestMuPOptimizers::test[True-MuSGD-SGD]": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_mup_optimizers.py::TestMuPOptimizers::test[False-MuAdam-Adam]": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_mup_optimizers.py::TestMuPOptimizers::test[False-MuAdamW-AdamW]": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_mup_optimizers.py::TestMuPOptimizers::test[True-MuAdam-Adam]": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_mup_optimizers.py::TestMuPOptimizers::test[False-MuSGD-SGD]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_frozen_weights[2]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_frozen_weights[1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[cpu-1-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[cpu-2-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[none-2-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[none-1-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_hpzero.py::TesthpZeroConfigSweep::test[2-20-2000]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_hpzero.py::TesthpZeroConfigSweep::test[1-8-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_hpzero.py::TesthpZeroConfigSweep::test[1-20-2000]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_hpzero.py::TesthpZeroConfigSweep::test[2-8-2000]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_hpzero.py::TesthpZeroConfigSweep::test[4-20-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_hpzero.py::TesthpZeroConfigSweep::test[4-8-2000]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_hpzero.py::TesthpZeroConfigSweep::test[4-20-2000]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_hpzero.py::TesthpZeroConfigSweep::test[1-8-2000]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_hpzero.py::TesthpZeroConfigSweep::test[2-8-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_hpzero.py::TesthpZeroConfigSweep::test[1-20-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_hpzero.py::TesthpZeroConfigSweep::test[2-20-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_hpzero.py::TesthpZeroConfigSweep::test[4-8-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_hpzero.py::TestSecondaryTensorSize::test[2-8-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_hpzero.py::TestSecondaryTensorSize::test[2-20-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_hpzero.py::TestSecondaryTensorSize::test[4-20-4000]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_hpzero.py::TestSecondaryTensorSize::test[4-8-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_hpzero.py::TestSecondaryTensorSize::test[2-8-4000]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_hpzero.py::TestSecondaryTensorSize::test[4-20-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_hpzero.py::TestSecondaryTensorSize::test[2-20-4000]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_hpzero.py::TestSecondaryTensorSize::test[4-8-4000]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_qgzero.py::TesthpZeroConfigSweep::test[20-2000]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_qgzero.py::TesthpZeroConfigSweep::test[8-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_qgzero.py::TesthpZeroConfigSweep::test[20-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_qgzero.py::TesthpZeroConfigSweep::test[8-2000]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_qwzero.py::TesthpZeroConfigSweep::test[8-2048]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_qwzero.py::TesthpZeroConfigSweep::test[20-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_qwzero.py::TesthpZeroConfigSweep::test[8-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_qwzero.py::TesthpZeroConfigSweep::test[20-2048]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_mics_optimizer.py::TestMiCSCheckpoint::test_not_load_optimizer_state[2]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_mics_optimizer.py::TestMiCSCheckpoint::test_load_optimizer_state[1]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_mics_optimizer.py::TestMiCSCheckpoint::test_not_load_optimizer_state[1]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_mics_optimizer.py::TestMiCSCheckpoint::test_load_module_only[1]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_mics_optimizer.py::TestMiCSCheckpoint::test_load_optimizer_state[4]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_mics_optimizer.py::TestMiCSCheckpoint::test_not_load_optimizer_state[4]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_mics_optimizer.py::TestMiCSCheckpoint::test_load_module_only[2]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_mics_optimizer.py::TestMiCSCheckpoint::test_load_optimizer_state[2]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_mics_optimizer.py::TestMiCSCheckpoint::test_load_module_only[4]": + "float16/half is not supported on Gaudi.", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShardinAutoTP::test[facebook/opt-350m]": + "float16/half is not supported on Gaudi.", + "unit/inference/test_inference.py::TestAutoTensorParallelism::test[fp16-marian]": + "float16/half is not supported on Gaudi.", + "unit/inference/test_inference.py::TestLowCpuMemUsage::test[gpt2]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[4-9-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[2-4-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[1-9-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[2-9-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[1-4-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[4-4-1024]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-1232-255-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-4096-1-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-1232-1-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-1232-1-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-4096-255-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-4096-255-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-1232-255-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-1232-1-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-512-255-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-4096-128-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-1232-128-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-1232-128-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-512-1-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-1232-255-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-512-128-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-512-128-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-512-128-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-512-255-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-1232-128-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-1232-128-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-512-255-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-4096-128-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-512-1-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-4096-255-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-512-255-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-512-128-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-4096-1-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-4096-255-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-4096-1-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-1232-1-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-4096-128-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-512-1-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-4096-1-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-512-1-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-4096-128-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-1232-255-2]": + "float16/half is not supported on Gaudi.", + "unit/inference/test_inference.py::TestAutoTP::test[falcon]": + "float16/half is not supported on Gaudi.", + "unit/inference/test_inference.py::TestAutoTensorParallelism::test[fp16-codegen]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe[4]": + "Xfail, FP16 not supported.", + "unit/moe/test_moe.py::TestPRMoE::test[2-True]": + "Xfail, FP16 not supported.", + "unit/moe/test_moe.py::TestPRMoE::test[2-False]": + "Xfail, FP16 not supported.", + "unit/moe/test_moe_tp.py::TestMOETensorParallel::test[True-False-1-2]": + "Xfail, FP16 not supported.", + "unit/moe/test_moe_tp.py::TestMOETensorParallel::test[False-True-2-2]": + "Xfail, FP16 not supported.", + "unit/moe/test_moe_tp.py::TestMOETensorParallel::test[True-False-2-2]": + "Xfail, FP16 not supported.", + "unit/moe/test_moe_tp.py::TestMOETensorParallel::test[True-True-2-2]": + "Xfail, FP16 not supported.", + "unit/moe/test_moe_tp.py::TestMOETensorParallel::test[True-False-1-4]": + "Xfail, FP16 not supported.", + "unit/moe/test_moe_tp.py::TestMOETensorParallel::test[True-True-1-2]": + "Xfail, FP16 not supported.", + "unit/moe/test_moe_tp.py::TestMOETensorParallel::test[False-False-1-4]": + "Xfail, FP16 not supported.", + "unit/moe/test_moe_tp.py::TestMOETensorParallel::test[False-False-1-2]": + "Xfail, FP16 not supported.", + "unit/moe/test_moe_tp.py::TestMOETensorParallel::test[False-False-2-2]": + "Xfail, FP16 not supported.", + "unit/moe/test_moe_tp.py::TestMOETensorParallel::test[True-True-1-4]": + "Xfail, FP16 not supported.", + "unit/moe/test_moe_tp.py::TestMOETensorParallel::test[False-True-1-4]": + "Xfail, FP16 not supported.", + "unit/moe/test_moe_tp.py::TestMOETensorParallel::test[False-True-1-2]": + "Xfail, FP16 not supported.", + "unit/runtime/zero/test_zero_context.py::TestSerialContext::test_subclass_param": + "Xfail, due to SW-156783.", + "unit/runtime/zero/test_zero_context_ancestry.py::TestSerialParamInit::test_subclass_param_init": + "Xfail, due to SW-143227.", + "unit/inference/test_inference.py::TestMPSize::test[fp32-gpt-neo]": + "Xfail, due to SW-175376.", + "unit/inference/test_inference.py::TestMPSize::test[fp32-gpt-j]": + "Xfail, due to SW-162660.", + "unit/inference/test_inference.py::TestMPSize::test[bf16-gpt-neo]": + "Xfail, due to SW-175376.", + "unit/inference/test_inference.py::TestMPSize::test[bf16-gpt-j]": + "Xfail, due to SW-162660.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-256-52-4-3-True-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-2048-128-32-3-False-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-384-16-3-False-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1600-128-25-3-False-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1600-128-25-3-True-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-128-128-2-3-True-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-384-16-3-True-True0]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-160-128-2-3-False-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-384-16-3-True-True1]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-2560-128-40-3-False-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-4096-128-64-3-True-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-160-128-2-3-True-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-120-16-3-True-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-512-16-3-True-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[64-1024-53-16-3-False-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1536-128-24-3-False-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[64-160-128-2-24-False-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-8192-128-64-3-False-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-511-16-3-False-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[1-256-2048-32-3-True-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[3-1024-54-16-3-True-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[64-1024-21-16-3-False-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1600-128-2-3-True-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_backward.py::TestCUDABackward::test_backward[8-1600-128-2-3-True-True-0.05]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_backward.py::TestCUDABackward::test_backward[8-160-128-2-3-True-True-0.1]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_backward.py::TestCUDABackward::test_backward[8-1600-128-25-3-True-True-0.05]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_backward.py::TestCUDABackward::test_backward[64-160-128-2-24-False-True-0.2]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_backward.py::TestCUDABackward::test_backward[64-1600-128-2-4-False-True-0.2]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForwardSmallBatchSize::test_forward_with_small_bsz[8-7-1024-512-16-3-True-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForwardSmallBatchSize::test_forward_with_small_bsz[8-7-1024-512-16-3-False-True]": + "CUDA tests not supported by HPU", + "unit/hybrid_engine/test_he_llama.py::TestHybridEngineLlama::test_functionality[huggyllama/llama-7b-bsz=1]": + "Xfail, due to SW-166162.", + "unit/hybrid_engine/test_he_llama.py::TestHybridEngineLlama::test_functionality[huggyllama/llama-7b-bsz=2]": + "Xfail, due to SW-166162.", + "unit/hybrid_engine/test_he_all.py::TestHybridEngineTextGen::test_functionality[facebook/opt-1.3b-bsz=1]": + "Xfail, due to SW-167459.", + "unit/hybrid_engine/test_he_all.py::TestHybridEngineTextGen::test_functionality[facebook/opt-1.3b-bsz=2]": + "Xfail, due to SW-167459.", + "unit/hybrid_engine/test_he_all.py::TestHybridEngineTextGen::test_functionality[EleutherAI/gpt-neo-1.3B-bsz=1]": + "Xfail, due to SW-167459.", + "unit/hybrid_engine/test_he_all.py::TestHybridEngineTextGen::test_functionality[EleutherAI/gpt-neo-1.3B-bsz=2]": + "Xfail, due to SW-167459.", + "unit/inference/test_stable_diffusion.py::TestStableDiffusion::test": + "Xfail, due to SW-170181.", + "unit/runtime/zero/test_zero_offloadpp.py::TestZeroPartialOffloadConfigSweep::test[8-1024]": + "Xfail, due to SW-170288.", + "unit/runtime/zero/test_zero_offloadpp.py::TestZeroPartialOffloadConfigSweep::test[4-1024]": + "Xfail, due to SW-170288.", + "unit/compression/test_dequantization.py::TestDequantization::test_dequantize": + "Xfail, due to SW-168442.", + "unit/ops/adam/test_hybrid_adam.py::TestHybridAdam::test_hybrid_adam_equal[8-fp16]": + "Xfail, due to Gaudi1 does not support FP16.", + "unit/ops/adam/test_hybrid_adam.py::TestHybridAdam::test_hybrid_adam_equal[16-fp16]": + "Xfail, due to Gaudi1 does not support FP16.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[none-3-full-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[cpu-3-full-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[cpu-3-local-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[none-3-local-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestEmptyParameterGroup::test_empty_param_groups[dtype1-True-True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestEmptyParameterGroup::test_empty_param_groups[dtype1-True-False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestEmptyParameterGroup::test_empty_param_groups[dtype1-False-True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestEmptyParameterGroup::test_empty_param_groups[dtype1-False-False]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_model_quantization[8bits]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_half_int8_quantization": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_post_init_quant_nvme_offload": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_quantized_initialization_nvme_offload": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_post_init_quant_cpu_offload[4bits]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_quantized_initialization[4bits]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_half_int4_quantization": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_quantized_linear[8bits-1]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_quantized_linear[8bits-0]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_quantized_initialization_cpu_offload[8bits]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_post_init_quant_cpu_offload[8bits]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_quantized_initialization_cpu_offload[4bits]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_quantized_linear[4bits-1]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_quantized_linear[4bits-0]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_post_init_quant[8bits]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_quantized_initialization[8bits]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_model_quantization[4bits]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_post_init_quant[4bits]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[none-1-full-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[cpu-1-full-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[none-2-full-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[cpu-2-full-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[2-True]": + "Xfail, FP16 not supported.", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[2-False]": + "Xfail, FP16 not supported.", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[4-False]": + "Xfail, FP16 not supported.", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[4-True]": + "Xfail, FP16 not supported.", + "unit/runtime/half_precision/test_bf16.py::TestZeroDtypeCocktail::test[default-fp16]": + "Fp16 not supported by Gaudi1", + "unit/runtime/compile/test_compile_wrapper.py::TestCustomMethod::test_custom_function": + "Fp16 not supported by Gaudi1", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[cpu-1-dtype1]": + "Fp16 not supported by Gaudi1", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[none-1-dtype1]": + "Fp16 not supported by Gaudi1", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[none-3-dtype1]": + "Fp16 not supported by Gaudi1", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[cpu-2-dtype1]": + "Fp16 not supported by Gaudi1", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-3-dtype1]": + "Fp16 not supported by Gaudi1", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[none-2-dtype1]": + "Fp16 not supported by Gaudi1", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[cpu-3-dtype1]": + "Fp16 not supported by Gaudi1", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-2-dtype1]": + "Fp16 not supported by Gaudi1", + "unit/runtime/compile/test_load_config.py::TestConfigLoad::test_set_compiler_fn": + "Fp16 not supported by Gaudi1", + "unit/runtime/compile/test_load_config.py::TestConfigLoad::test_compile_kwargs": + "Fp16 not supported by Gaudi1", + "unit/runtime/compile/test_load_config.py::TestConfigLoad::test_compile": + "Fp16 not supported by Gaudi1", + "unit/runtime/compile/test_load_config.py::TestConfigLoad::test_compile_disabled": + "Fp16 not supported by Gaudi1", + "unit/runtime/compile/test_load_config.py::TestConfigLoad::test_custom_backend": + "Fp16 not supported by Gaudi1", + "unit/runtime/compile/test_load_config.py::TestConfigLoad::test_set_compile_kwargs": + "Fp16 not supported by Gaudi1", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-3-dtype0]": + "Not supported on Gaudi1", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-3-dtype2]": + "Not supported on Gaudi1", + "unit/runtime/pipe/test_pipe.py::TestPipeCifar10::test_pipe_use_reentrant[topo_config1]": + " Comm Init Rank Error.", + "unit/runtime/pipe/test_pipe.py::TestPipeCifar10::test_pipe_use_reentrant[topo_config2]": + " Comm Init Rank Error.", + "unit/utils/test_init_on_device.py::TestOnDevice::test_on_device[hpu]": + "Xfail, due to SW-178730.", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[2-False-False]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[4-True-False]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[4-False-False]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[2-True-False]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_frozen_weights[2-False]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_frozen_weights[1-False]": + "Fp16 not supported by Gaudi1", + "unit/moe/test_moe.py::TestMoE::test[True-0-4]": + "Xfail, due to SW-180322.", + "unit/moe/test_moe.py::TestMoE::test[False-0-2]": + "Xfail, due to SW-180322.", + "unit/moe/test_moe.py::TestMoE::test[True-0-2]": + "Xfail, due to SW-180322.", + "unit/moe/test_moe.py::TestMoE::test[False-0-4]": + "Xfail, due to SW-180322.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[3-True-True]": + "Xfail, due to SW-179864.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[3-False-True]": + "Xfail, due to SW-179864.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[3-True-True]": + "Xfail, due to SW-179864.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[3-False-True]": + "Xfail, due to SW-179864.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[3-False-Adam-True]": + "Xfail, due to SW-179864.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[3-True]": + "Xfail, due to SW-179864.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[3-True-deepspeed_adam-True]": + "Xfail, due to SW-179864.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[3-True]": + "Xfail, due to SW-179864.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_immediate_save_load[3-True]": + "Xfail, due to SW-179864.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_not_load_optimizer_state[3-True]": + "Xfail, due to SW-179864.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_module_only[3-True]": + "Xfail, due to SW-179864.", + "unit/checkpoint/test_shared_weights.py::TestCheckpointSharedWeights::test_checkpoint_shared_weights[True]": + "Xfail, due to SW-179861.", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[4-False-True]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[2-False-True]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[4-True-True]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[2-True-True]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_pipeline.py::TestPipelineCheckpoint::test_checkpoint_pipe_engine[0-True]": + "Xfail, due to SW-179868.", + "unit/checkpoint/test_pipeline.py::TestPipelineCheckpoint::test_checkpoint_pipe_engine[1-True]": + "Xfail, due to SW-179868.", + "unit/checkpoint/test_zero_optimizer.py::TestSaveTensorClone::test_save_tensor_clone[True-1-True]": + "Xfail, due to SW-179873.", + "unit/checkpoint/test_zero_optimizer.py::TestSaveTensorClone::test_save_tensor_clone[True-2-True]": + "Xfail, due to SW-179873.", + "unit/checkpoint/test_zero_optimizer.py::TestSaveTensorClone::test_save_tensor_clone[False-2-True]": + "Xfail, due to SW-179873.", + "unit/checkpoint/test_zero_optimizer.py::TestSaveTensorClone::test_save_tensor_clone[False-1-True]": + "Xfail, due to SW-179873.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_frozen_weights[2-True]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_custom_frozen_weights[1-True]": + "Xfail, due to SW-179873.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_custom_frozen_weights[2-True]": + "Xfail, due to SW-179873.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_frozen_weights[1-True]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_pipeline_checkpoint_loading[3-True]": + "Fp16 not supported by Gaudi1.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-False-True-True]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[True-True-True-True]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-True-True-True]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[True-True-False-True]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-True-True-True]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-True-False-True]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[False-False-True-True]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[False-True-False-True]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-False-False-True]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[True-False-False-True]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[False-True-True-True]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-True-False-True]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-False-True-True]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[False-False-False-True]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-False-False-True]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[True-False-True-True]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-False-True-False]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[True-True-True-False]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-True-True-False]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[True-True-False-False]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-True-True-False]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-True-False-False]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[False-False-True-False]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[False-True-False-False]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-False-False-False]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[True-False-False-False]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[False-True-True-False]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-True-False-False]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-False-True-False]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[False-False-False-False]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-False-False-False]": + "Fp16 not supported by Gaudi1", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[True-False-True-False]": + "Fp16 not supported by Gaudi1", + "unit/runtime/comm/test_coalesced_collectives.py::TestReduceScatterCoalescedTensorSmallerThanWorldSize::test": + "fp16 is not supported Gaudi.", + "unit/runtime/comm/test_coalesced_collectives.py::TestReduceScatterCoalesced::test_single_input": + "fp16 is not supported Gaudi.", + "unit/runtime/comm/test_coalesced_collectives.py::TestReduceScatterCoalesced::test_two_inputs": + "fp16 is not supported Gaudi.", + "unit/runtime/test_ds_initialize.py::TestNoOptim::test[3]": + "fp16 is not supported Gaudi.", + "unit/runtime/zero/test_zero_context.py::TestGatherUpdate::test": + "fp16 is not supported Gaudi.", + "unit/runtime/zero/test_zero_context.py::TestScatterGather::test": + "fp16 is not supported Gaudi.", + "unit/runtime/zero/test_zero_context_ancestry.py::TestDSInitWZinit::test": + "fp16 is not supported Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[cpu-3-full-True]": + "fp16 is not supported Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[none-3-full-True]": + "fp16 is not supported Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[none-3-local-True]": + "fp16 is not supported Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[cpu-3-local-True]": + "fp16 is not supported Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[cpu-3-local-False]": + "fp16 is not supported Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[none-3-local-False]": + "fp16 is not supported Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[none-3-full-False]": + "fp16 is not supported Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[cpu-3-full-False]": + "fp16 is not supported Gaudi.", + "unit/runtime/test_data_efficiency.py::TestLegacyCurriculumScheduler::test_fixed_discrete": + "fp16 is not supported Gaudi.", + "unit/runtime/test_data_efficiency.py::TestLegacyCurriculumScheduler::test_fixed_linear": + "fp16 is not supported Gaudi.", + "unit/runtime/test_data_efficiency.py::TestDataEfficiency::test_curriculum_learning": + "fp16 is not supported Gaudi.", + "unit/runtime/test_ds_config_dict.py::TestConfigLoad::test_hjson": + "fp16 is not supported Gaudi.", + "unit/runtime/test_ds_config_dict.py::TestConfigLoad::test_dict": + "fp16 is not supported Gaudi.", + "unit/runtime/test_ds_config_dict.py::TestConfigLoad::test_json": + "fp16 is not supported Gaudi.", + "unit/runtime/test_ds_config_dict.py::TestDistInit::test": + "fp16 is not supported Gaudi.", + "unit/runtime/test_ds_config_dict.py::TestDeprecatedDeepScaleConfig::test": + "fp16 is not supported Gaudi.", + "unit/runtime/test_ds_config_dict.py::TestArgs::test_none_args": + "fp16 is not supported Gaudi.", + "unit/runtime/test_ds_config_dict.py::TestArgs::test_no_args": + "fp16 is not supported Gaudi.", + "unit/runtime/test_ds_config_dict.py::TestInitNoOptimizer::test": + "fp16 is not supported Gaudi.", + "unit/runtime/test_ds_initialize.py::TestNoOptim::test[0]": + "fp16 is not supported Gaudi.", + "unit/runtime/zero/test_ignore_unused_parameters.py::TestStage2IgnoreUnusedParameters::test[True]": + "fp16 is not supported Gaudi.", + "unit/runtime/zero/test_ignore_unused_parameters.py::TestStage2IgnoreUnusedParameters::test[False]": + "fp16 is not supported Gaudi.", + "unit/checkpoint/test_other_optimizer.py::TestOtherOptimizerCheckpoint::test_checkpoint_fused_optimizer[False]": + "fp16 is not supported Gaudi.", + "unit/runtime/test_pld.py::TestNonPLDModel::test_non_pld_model": + "fp16 is not supported Gaudi.", + "unit/runtime/test_pld.py::TestPLDModel::test_pld_model[0.1]": + "fp16 is not supported Gaudi.", + "unit/runtime/test_pld.py::TestPLDModel::test_pld_model[1.0]": + "fp16 is not supported Gaudi.", + "unit/runtime/test_pld.py::TestPLDModel::test_pld_model[0.9]": + "fp16 is not supported Gaudi.", + "unit/runtime/test_pld.py::TestPLDModel::test_pld_model[0]": + "fp16 is not supported Gaudi.", + "unit/runtime/zero/test_zero_context.py::TestSerialContext::test_ext_param_getattr": + "fp16 is not supported Gaudi.", + "unit/runtime/zero/test_zero_context_return.py::TestReturnParam::test_stage_3_output_type[dict]": + "fp16 is not supported Gaudi.", + "unit/runtime/zero/test_zero_context_return.py::TestReturnParam::test_ext_param_return": + "fp16 is not supported Gaudi.", + "unit/runtime/zero/test_zero_context_return.py::TestReturnParam::test_stage_3_output_type[tensor]": + "fp16 is not supported Gaudi.", + "unit/runtime/zero/test_zero_context_return.py::TestReturnParam::test_stage_3_output_type[None]": + "fp16 is not supported Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[none-1-full-False]": + "fp16 is not supported Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[cpu-1-full-False]": + "fp16 is not supported Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[cpu-2-full-False]": + "fp16 is not supported Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[cpu-2-full-True]": + "fp16 is not supported Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[none-1-full-True]": + "fp16 is not supported Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[cpu-1-full-True]": + "fp16 is not supported Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[none-2-full-True]": + "fp16 is not supported Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[none-2-full-False]": + "fp16 is not supported Gaudi.", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[cpu-facebook/opt-350m-zero_stage=2-bsz=1]": + "fp16 is not supported Gaudi.", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[cpu-EleutherAI/gpt-neo-125m-zero_stage=3-bsz=1]": + "fp16 is not supported Gaudi.", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[cpu-EleutherAI/gpt-neo-125m-zero_stage=2-bsz=1]": + "fp16 is not supported Gaudi.", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[none-facebook/opt-350m-zero_stage=3-bsz=1]": + "fp16 is not supported Gaudi.", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[none-facebook/opt-350m-zero_stage=2-bsz=1]": + "fp16 is not supported Gaudi.", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[cpu-bigscience/bloom-560m-zero_stage=2-bsz=1]": + "fp16 is not supported Gaudi.", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[none-EleutherAI/gpt-neo-125m-zero_stage=3-bsz=1]": + "fp16 is not supported Gaudi.", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[cpu-facebook/opt-350m-zero_stage=3-bsz=1]": + "fp16 is not supported Gaudi.", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[none-EleutherAI/gpt-neo-125m-zero_stage=2-bsz=1]": + "fp16 is not supported Gaudi.", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[none-bigscience/bloom-560m-zero_stage=2-bsz=1]": + "fp16 is not supported Gaudi.", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[none-bigscience/bloom-560m-zero_stage=3-bsz=1]": + "fp16 is not supported Gaudi.", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[cpu-bigscience/bloom-560m-zero_stage=3-bsz=1]": + "fp16 is not supported Gaudi.", + "unit/inference/test_inference_config.py::TestInferenceConfig::test_json_config": + "fp16 is not supported Gaudi.", + "unit/inference/test_inference_config.py::TestInferenceConfig::test_overlap_kwargs": + "fp16 is not supported Gaudi.", + "unit/checkpoint/test_other_optimizer.py::TestOtherOptimizerCheckpoint::test_checkpoint_fused_optimizer[True]": + "fp16 is not supported Gaudi.", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShardinAutoTP::test[EleutherAI/gpt-j-6B]": + "float16/half is not supported on Gaudi.", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShardinAutoTP::test[EleutherAI/gpt-neo-125M]": + "float16/half is not supported on Gaudi.", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShardinAutoTP::test[bigscience/bloom-560m]": + "float16/half is not supported on Gaudi.", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShardinAutoTP::test[facebook/opt-125m]": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_autocast.py::TestAutoCastDisable::test_missing_amp_autocast[True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[3-False-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[3-False-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[3-True-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[3-True-False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZero3InitForParentWeightInitialization::test": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZero3ParamPartitioningManyParams::test[True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroPartitionCache::test_training_partition_cache[False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroPartitionCache::test_training_partition_cache[True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZero3DictFwd::test[list]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZero3DictFwd::test[tuple]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZero3DictFwd::test[dict]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZero3ParamPartitioningLargeParam::test[True-True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZero3ParamPartitioningLargeParam::test[False-True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroFrozenWeights::test[3]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_context.py::TestSerialContext::test_scatter_halftype": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_module_only[3-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_optimizer_state[3-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_not_load_optimizer_state[3-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[3-True-deepspeed_adam-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[3-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[3-False-Adam-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[3-True-deepspeed_adam-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[3-False-Adam-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestSaveTensorClone::test_save_tensor_clone[False-1-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestSaveTensorClone::test_save_tensor_clone[True-1-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestSaveTensorClone::test_save_tensor_clone[False-2-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestSaveTensorClone::test_save_tensor_clone[True-2-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[2-True-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[1-False-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[2-False-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[0-False-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[1-False-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[0-False-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[2-True-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[2-False-False]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe.py::TestMoE::test[False-2-2]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe.py::TestMoE::test[False-2-4]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe.py::TestMoE::test[True-1-2]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe.py::TestMoE::test[True-2-2]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe.py::TestMoE::test[False-1-2]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe.py::TestMoE::test[True-2-4]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe.py::TestMoE::test[False-1-4]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe.py::TestMoE::test[True-1-4]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_pipeline.py::TestPipelineCheckpoint::test_checkpoint_pipe_engine[1-False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestPartitionNcclAlignment::test": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroUnbalancedGradients::test[3]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroUnbalancedGradients::test[1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroUnbalancedGradients::test[2]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZero3ParamPartitioningManyParams::test[False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroOffloadStage1::test": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroToFP32::test_2_param_groups[False-2]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroToFP32::test_2_param_groups[True-3]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroToFP32::test_2_param_groups[True-2]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroToFP32::test_1_param_group[False-2]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroToFP32::test_1_param_group[True-2]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroToFP32::test_1_param_group[True-3]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroToFP32::test_2_param_groups[False-3]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroToFP32::test_1_param_group[False-3]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestIncorectAllgatherBucketSize::test[1001]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestIncorectAllgatherBucketSize::test[1000]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroAdamOptimizerStepCount::test[1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroAdamOptimizerStepCount::test[2]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroAdamOptimizerStepCount::test[3]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZero3ParamPartitioningLargeParam::test[True-False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZero3ParamPartitioningLargeParam::test[False-False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZero3RepeatForwardLoop::test": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroFrozenWeights::test[2]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroFrozenWeights::test[1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZero3ParamPartitioningBase::test_fp16_enabled[True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroOffloadOptim::test[True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroOffloadOptim::test[False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_module_only[2-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_not_load_optimizer_state[1-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_optimizer_state[1-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_module_only[1-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_not_load_optimizer_state[2-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_custom_frozen_weights[2-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_custom_frozen_weights[1-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_optimizer_state[2-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_immediate_save_load[0-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[3-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[3-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[2-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[0-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[1-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[2-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_immediate_save_load[3-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[0-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_immediate_save_load[1-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_immediate_save_load[2-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[1-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[0-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[2-True-deepspeed_adam-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[1-False-Adam-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[1-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[2-False-Adam-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[1-False-Adam-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[2-True-deepspeed_adam-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_hybrid_optimizer_state[2-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_hybrid_optimizer_state[1-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[2-False-Adam-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[2-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[2-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[1-False-Adam-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[1-False-Adam-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[1-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[0-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[2-False-Adam-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_hybrid_optimizer_state[2-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[2-True-deepspeed_adam-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[2-True-deepspeed_adam-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_hybrid_optimizer_state[1-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[2-False-Adam-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[2-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[1-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[0-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[3-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_immediate_save_load[2-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[2-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[1-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_immediate_save_load[0-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[0-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_immediate_save_load[1-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_not_load_optimizer_state[2-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_optimizer_state[2-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_optimizer_state[1-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_not_load_optimizer_state[1-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_module_only[1-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_module_only[2-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[2-False-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[1-False-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[0-False-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[2-True-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[0-False-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[2-False-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[2-True-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[1-False-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[3-False-Adam-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[3-True-deepspeed_adam-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_optimizer_state[3-True]": + "float16/half is not supported on Gaudi.", + "unit/inference/test_inference.py::TestAutoTensorParallelism::test_odd_world_size[fp16-marian]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test_gradient_accumulation[1-9-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test_eval[2-9-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test_gradient_accumulation[4-9-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test_eval[1-9-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test_eval[4-9-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test_gradient_accumulation[2-9-1024]": + "float16/half is not supported on Gaudi.", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[distilbert/distilgpt2-text-generation-fp32-noCG-noTriton]": + "Xfail due to SW-182748", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[distilbert/distilgpt2-text-generation-bf16-noCG-noTriton]": + "Xfail due to SW-182748", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-bf16-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-bf16-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp32-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-bf16-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-bf16-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-bf16-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-bf16-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-bf16-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-bf16-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-bf16-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-bf16-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp32-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-bf16-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp32-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp32-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp32-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-bf16-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp32-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp32-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-bf16-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-bf16-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp32-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp32-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp32-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-bf16-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp32-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-bf16-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp32-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp32-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp32-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp32-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp32-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-bf16-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp32-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-bf16-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-bf16-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp32-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-bf16-CG-noTriton]": + "Xfail due to SW-163097", + "unit/accelerator/test_accelerator.py::test_abstract_methods_defined[deepspeed.accelerator.xpu_accelerator]": + "Xfail due to SW-182749", + "unit/runtime/comm/test_coalesced_collectives.py::TestAllToAllQuantReduceFallback::test_non_divisible": + "float16/half is not supported on Gaudi.", + "unit/runtime/comm/test_coalesced_collectives.py::TestAllToAllQuantReduceFallback::test_1d_tensor": + "float16/half is not supported on Gaudi.", + "unit/utils/test_init_on_device.py::TestOnDevice::test_on_device[hpu:0]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestParamPartitioningSkipInit::test": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZero3RepeatForwardLoop::test[True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZero3RepeatForwardLoop::test[False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_leaf_module.py::TestSetZ3LeafModule::test_no_grad_input_error": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_leaf_module.py::TestSetZ3LeafModule::test_choose_module_by_counter": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_leaf_module.py::TestSetZ3LeafModule::test_choose_module_by_rank": + "float16/half is not supported on Gaudi.", + "unit/launcher/test_user_args.py::test_user_args[True-I'm going to tell them \"DeepSpeed is the best\"]": + "Xfail due to SW-182753", + "unit/launcher/test_user_args.py::test_user_args[True-'\"translate English to Romanian: \"']": + "Xfail due to SW-182753", + "unit/launcher/test_user_args.py::test_user_args[True-'I am 72\" tall']": + "Xfail due to SW-182753", + "unit/launcher/test_user_args.py::test_user_args[True-\"I am 6' tall\"]": + "Xfail due to SW-182753", + "unit/inference/test_inference.py::TestLMCorrectness::test[lambada_standard-gpt2-openai-community/gpt2-xl]": + "xfail due to model download", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-bf16-CG-noTriton]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[deepset/roberta-base-squad2-question-answering-bf16-noCG-noTriton]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-bf16-noCG-noTriton]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[Jean-Baptiste/roberta-large-ner-english-token-classification-bf16-noCG-noTriton]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[deepset/roberta-base-squad2-question-answering-bf16-CG-noTriton]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[deepset/minilm-uncased-squad2-question-answering-bf16-CG-noTriton]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[j-hartmann/emotion-english-distilroberta-base-text-classification-bf16-CG-noTriton]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-bf16-noCG-noTriton]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[dslim/bert-base-NER-token-classification-bf16-noCG-noTriton]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-bf16-CG-noTriton]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[deepset/minilm-uncased-squad2-question-answering-bf16-noCG-noTriton]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[j-hartmann/emotion-english-distilroberta-base-text-classification-bf16-noCG-noTriton]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[dslim/bert-base-NER-token-classification-bf16-CG-noTriton]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[Jean-Baptiste/roberta-large-ner-english-token-classification-bf16-CG-noTriton]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[google-bert/bert-base-multilingual-cased-fill-mask-fp16-CG-noTriton]": + "Xfail due to FP16 not supported on Gaudi", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[bigscience/bloom-560m-text-generation-fp16-noCG-noTriton]": + "Xfail due to FP16 not supported on Gaudi", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[deepset/minilm-uncased-squad2-question-answering-fp16-noCG-noTriton]": + "Xfail due to FP16 not supported on Gaudi", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp16-noCG-noTriton]": + "Xfail due to FP16 not supported on Gaudi", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp16-CG-noTriton]": + "Xfail due to FP16 not supported on Gaudi", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[FacebookAI/roberta-base-fill-mask-fp16-noCG-noTriton]": + "Xfail due to FP16 not supported on Gaudi", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp16-CG-noTriton]": + "Xfail due to FP16 not supported on Gaudi", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp16-CG-noTriton]": + "Xfail due to FP16 not supported on Gaudi", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[google-bert/bert-large-uncased-fill-mask-fp16-noCG-noTriton]": + "Xfail due to FP16 not supported on Gaudi", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[google-bert/bert-base-cased-fill-mask-fp16-noCG-noTriton]": + "Xfail due to FP16 not supported on Gaudi", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[google-bert/bert-large-uncased-fill-mask-fp16-CG-noTriton]": + "Xfail due to FP16 not supported on Gaudi", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[FacebookAI/roberta-base-fill-mask-fp16-CG-noTriton]": + "Xfail due to FP16 not supported on Gaudi", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[distilbert/distilgpt2-text-generation-fp16-noCG-noTriton]": + "Xfail due to FP16 not supported on Gaudi", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[google-bert/bert-base-uncased-fill-mask-fp16-noCG-noTriton]": + "Xfail due to FP16 not supported on Gaudi", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp16-CG-noTriton]": + "Xfail due to FP16 not supported on Gaudi", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[facebook/opt-350m-text-generation-fp16-noCG-noTriton]": + "Xfail due to FP16 not supported on Gaudi", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[facebook/opt-125m-text-generation-fp16-noCG-noTriton]": + "Xfail due to FP16 not supported on Gaudi", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[deepset/roberta-base-squad2-question-answering-fp16-CG-noTriton]": + "Xfail due to FP16 not supported on Gaudi", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[google-bert/bert-base-cased-fill-mask-fp16-CG-noTriton]": + "Xfail due to FP16 not supported on Gaudi", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[google-bert/bert-large-cased-fill-mask-fp16-noCG-noTriton]": + "Xfail due to FP16 not supported on Gaudi", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp16-noCG-noTriton]": + "Xfail due to FP16 not supported on Gaudi", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[Norod78/hebrew-bad_wiki-gpt_neo-tiny-text-generation-fp16-noCG-noTriton]": + "Xfail due to FP16 not supported on Gaudi", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[google-bert/bert-base-multilingual-cased-fill-mask-fp16-noCG-noTriton]": + "Xfail due to FP16 not supported on Gaudi", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp16-noCG-noTriton]": + "Xfail due to FP16 not supported on Gaudi", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[FacebookAI/roberta-large-fill-mask-fp16-CG-noTriton]": + "Xfail due to FP16 not supported on Gaudi", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[deepset/roberta-base-squad2-question-answering-fp16-noCG-noTriton]": + "Xfail due to FP16 not supported on Gaudi", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[FacebookAI/roberta-large-fill-mask-fp16-noCG-noTriton]": + "Xfail due to FP16 not supported on Gaudi", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[google-bert/bert-base-uncased-fill-mask-fp16-CG-noTriton]": + "Xfail due to FP16 not supported on Gaudi", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[dslim/bert-base-NER-token-classification-fp16-noCG-noTriton]": + "Xfail due to FP16 not supported on Gaudi", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp16-noCG-noTriton]": + "Xfail due to FP16 not supported on Gaudi", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp16-CG-noTriton]": + "Xfail due to FP16 not supported on Gaudi", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[dslim/bert-base-NER-token-classification-fp16-CG-noTriton]": + "Xfail due to FP16 not supported on Gaudi", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[deepset/minilm-uncased-squad2-question-answering-fp16-CG-noTriton]": + "Xfail due to FP16 not supported on Gaudi", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[openai-community/gpt2-text-generation-fp16-noCG-noTriton]": + "Xfail due to FP16 not supported on Gaudi", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[google-bert/bert-large-cased-fill-mask-fp16-CG-noTriton]": + "Xfail due to FP16 not supported on Gaudi", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp16-noCG-noTriton]": + "Xfail due to FP16 not supported on Gaudi", + "unit/inference/test_inference.py::TestMPSize::test[fp16-gpt-neo]": + "Xfail due to FP16 not supported on gaudi", + "unit/inference/test_inference.py::TestMPSize::test[fp16-bloom]": + "Xfail due to FP16 not supported on gaudi", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[False-False-False-1-dtype1]": + "Xfail due to fp16 not supported", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[False-False-False-1-dtype1]": + "Xfail due to fp16 not supported", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[False-True-False-1-dtype1]": + "Xfail due to fp16 not supported", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[False-True-True-1-dtype1]": + "Xfail due to fp16 not supported", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[False-True-False-1-dtype1]": + "Xfail due to fp16 not supported", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[False-False-True-1-dtype1]": + "Xfail due to fp16 not supported", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[False-False-False-1-dtype1]": + "Xfail due to fp16 not supported", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[False-True-True-1-dtype1]": + "Xfail due to fp16 not supported", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[False-True-False-1-dtype1]": + "Xfail due to fp16 not supported", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[False-True-True-1-dtype1]": + "Xfail due to fp16 not supported", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[False-False-True-1-dtype1]": + "Xfail due to fp16 not supported", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[False-False-True-1-dtype1]": + "Xfail due to fp16 not supported", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp32-noCG-noTriton-True]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp32-CG-noTriton-True]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp32-noCG-noTriton-True]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp32-noCG-noTriton-True]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp32-noCG-noTriton-True]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp32-CG-noTriton-True]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp32-CG-noTriton-True]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp32-CG-noTriton-True]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp32-CG-noTriton-True]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp32-CG-noTriton-True]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp32-CG-noTriton-True]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp32-CG-noTriton-True]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp32-noCG-noTriton-True]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp32-noCG-noTriton-True]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp32-CG-noTriton-True]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp32-noCG-noTriton-True]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp32-CG-noTriton-True]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-noCG-noTriton-True]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp32-CG-noTriton-True]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp32-noCG-noTriton-True]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp32-noCG-noTriton-True]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp32-CG-noTriton-True]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp32-CG-noTriton-True]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp32-noCG-noTriton-True]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp32-CG-noTriton-True]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp32-noCG-noTriton-True]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-CG-noTriton-True]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp32-noCG-noTriton-True]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp32-noCG-noTriton-True]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp32-noCG-noTriton-True]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp32-noCG-noTriton-True]": + "Xfail, due to SW-163097.", +} + +g2_lazy_xfail_tests = { + "unit/inference/test_inference.py::TestModelTask::test[bert-large-uncased-fill-mask-fp32-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp32-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-uncased-fill-mask-bf16-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-cased-fill-mask-fp16-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-multilingual-uncased-fill-mask-fp32-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-multilingual-uncased-fill-mask-fp16-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[roberta-large-fill-mask-fp32-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-cased-fill-mask-fp32-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[roberta-base-fill-mask-bf16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-bf16-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-bf16-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-cased-fill-mask-bf16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[roberta-base-fill-mask-bf16-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp32-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-uncased-fill-mask-fp32-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-cased-fill-mask-bf16-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-bf16-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-cased-fill-mask-fp32-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-cased-fill-mask-bf16-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp16-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-uncased-fill-mask-bf16-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[roberta-large-fill-mask-fp16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-multilingual-cased-fill-mask-bf16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-bf16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-uncased-fill-mask-fp16-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-uncased-fill-mask-fp16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp16-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-uncased-fill-mask-bf16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp32-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-multilingual-uncased-fill-mask-bf16-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp32-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-bf16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-cased-fill-mask-bf16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[roberta-base-fill-mask-fp32-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-cased-fill-mask-fp32-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[roberta-large-fill-mask-fp32-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp32-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-uncased-fill-mask-bf16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-cased-fill-mask-fp16-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-bf16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-multilingual-cased-fill-mask-fp32-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-multilingual-uncased-fill-mask-bf16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-bf16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-cased-fill-mask-fp16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp16-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-uncased-fill-mask-fp32-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-uncased-fill-mask-fp16-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-cased-fill-mask-fp16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[roberta-large-fill-mask-bf16-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-multilingual-cased-fill-mask-fp32-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp16-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[roberta-base-fill-mask-fp16-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-bf16-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp32-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[roberta-base-fill-mask-fp16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-bf16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp32-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp32-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-bf16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-bf16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-uncased-fill-mask-fp16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[roberta-large-fill-mask-bf16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-multilingual-uncased-fill-mask-fp32-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-multilingual-uncased-fill-mask-fp16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[roberta-base-fill-mask-fp32-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp32-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-multilingual-cased-fill-mask-fp16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-bf16-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[roberta-large-fill-mask-fp16-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-multilingual-cased-fill-mask-fp16-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp32-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-cased-fill-mask-fp32-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-uncased-fill-mask-fp32-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp16-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[bert-base-multilingual-cased-fill-mask-bf16-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-bf16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp32-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[EleutherAI/pythia-70m-deduped-text-generation-bf16-noCG-noTriton]": + "Xfail, due to SW-163095.", + "unit/inference/test_inference.py::TestModelTask::test[EleutherAI/pythia-70m-deduped-text-generation-fp16-noCG-noTriton]": + "Xfail, due to SW-163095.", + "unit/inference/test_inference.py::TestModelTask::test[EleutherAI/pythia-70m-deduped-text-generation-fp32-noCG-noTriton]": + "Xfail, due to SW-163095.", + "unit/inference/test_inference.py::TestModelTask::test[distilbert-base-cased-distilled-squad-question-answering-fp16-noCG-noTriton]": + "Xfail, failed on vanilla as well.", + "unit/inference/test_inference.py::TestModelTask::test[EleutherAI/gpt-j-6b-text-generation-fp16-noCG-noTriton]": + "Xfail, due to SW-163098.", + "unit/inference/test_inference.py::TestModelTask::test[distilbert-base-cased-distilled-squad-question-answering-fp32-CG-noTriton]": + "Xfail, failed on vanilla as well.", + "unit/inference/test_inference.py::TestModelTask::test[distilbert-base-cased-distilled-squad-question-answering-fp32-noCG-noTriton]": + "Xfail, failed on vanilla as well.", + "unit/inference/test_inference.py::TestModelTask::test[EleutherAI/gpt-j-6b-text-generation-fp32-noCG-noTriton]": + "Xfail, due to SW-163098.", + "unit/inference/test_inference.py::TestModelTask::test[distilbert-base-cased-distilled-squad-question-answering-bf16-noCG-noTriton]": + "Xfail, failed on vanilla as well.", + "unit/inference/test_inference.py::TestModelTask::test[Norod78/hebrew-bad_wiki-gpt_neo-tiny-text-generation-bf16-noCG-noTriton]": + "Xfail, due to SW-163098.", + "unit/inference/test_inference.py::TestModelTask::test[distilbert-base-cased-distilled-squad-question-answering-bf16-CG-noTriton]": + "Xfail, failed on vanilla as well.", + "unit/inference/test_inference.py::TestModelTask::test[EleutherAI/gpt-j-6b-text-generation-bf16-noCG-noTriton]": + "Xfail, due to SW-163098.", + "unit/inference/test_inference.py::TestModelTask::test[gpt2-text-generation-bf16-noCG-noTriton]": + "Xfail, due to SW-163098.", + "unit/inference/test_inference.py::TestModelTask::test[distilbert-base-cased-distilled-squad-question-answering-fp16-CG-noTriton]": + "Xfail, failed on vanilla as well.", + "unit/inference/test_model_profiling.py::TestModelProfiling::test[False-False]": # noqa: F601 + "Xfail, due to SW-163097.", + "unit/inference/test_model_profiling.py::TestModelProfiling::test[False-True]": # noqa: F601 + "Xfail, due to SW-163097.", + "unit/inference/test_model_profiling.py::TestModelProfiling::test[True-False]": # noqa: F601 + "Xfail, due to SW-163097.", + "unit/inference/test_model_profiling.py::TestModelProfiling::test[True-True]": # noqa: F601 + "Xfail, due to SW-163097.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-512-16-3-True-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-384-16-3-False-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1600-128-2-3-True-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-120-16-3-True-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-160-128-2-3-False-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-256-52-4-3-True-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-8192-128-64-3-False-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[64-1024-53-16-3-False-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[64-160-128-2-24-False-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-128-128-2-3-True-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1600-128-25-3-False-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-4096-128-64-3-True-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1536-128-24-3-False-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-384-16-3-True-True0]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-160-128-2-3-True-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-2048-128-32-3-False-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-2560-128-40-3-False-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1600-128-25-3-True-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[1-256-2048-32-3-True-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[64-1024-21-16-3-False-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-384-16-3-True-True1]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[3-1024-54-16-3-True-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-511-16-3-False-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_backward.py::TestCUDABackward::test_backward[8-1600-128-2-3-True-True-0.05]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_backward.py::TestCUDABackward::test_backward[64-1600-128-2-4-False-True-0.2]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_backward.py::TestCUDABackward::test_backward[8-160-128-2-3-True-True-0.1]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_backward.py::TestCUDABackward::test_backward[8-1600-128-25-3-True-True-0.05]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_backward.py::TestCUDABackward::test_backward[64-160-128-2-24-False-True-0.2]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForwardSmallBatchSize::test_forward_with_small_bsz[8-7-1024-512-16-3-False-True]": + "CUDA tests not supported by HPU", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForwardSmallBatchSize::test_forward_with_small_bsz[8-7-1024-512-16-3-True-True]": + "CUDA tests not supported by HPU", + "unit/inference/test_inference.py::TestMPSize::test[fp32-gpt-j]": + "Xfail, due to SW-162660.", + "unit/inference/test_inference.py::TestMPSize::test[bf16-gpt-neo]": + "Xfail, due to SW-175376.", + "unit/inference/test_inference.py::TestMPSize::test[fp16-gpt-neo]": + "Xfail, due to SW-175376.", + "unit/inference/test_inference.py::TestMPSize::test[bf16-gpt-j]": + "Xfail, due to SW-162660.", + "unit/inference/test_inference.py::TestMPSize::test[fp16-bloom]": + "Xfail, due to SW-175376.", + "unit/inference/test_inference.py::TestMPSize::test[fp16-gpt-j]": + "Xfail, due to SW-162660.", + "unit/inference/test_inference.py::TestMPSize::test[fp32-gpt-neo]": + "Xfail, due to SW-175376.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[4-4-1024]": + "Xfail, due to SW-164239.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[2-4-1024]": + "Xfail, due to SW-164239.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[1-9-1024]": + "Xfail, due to SW-164239.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[4-9-1024]": + "Xfail, due to SW-164239.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[1-4-1024]": + "Xfail, due to SW-164239.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[2-9-1024]": + "Xfail, due to SW-164239.", + "unit/runtime/half_precision/test_fp16.py::TestAdamFP16ZeroOneCycleCompatibility::test[True-1]": + "Xfail, due to SW-145262.", + "unit/runtime/half_precision/test_fp16.py::TestAdamFP16ZeroOneCycleCompatibility::test[True-2]": + "Xfail, due to SW-145262.", + "unit/runtime/half_precision/test_fp16.py::TestAdamFP16ZeroOneCycleCompatibility::test[True-3]": + "Xfail, due to SW-145262.", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[22-fp16]": + "Xfail, due to SW-162575.", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[1048576-fp16]": + "Xfail, due to SW-162575.", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[1024-fp16]": + "Xfail, due to SW-162575.", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[128-fp16]": + "Xfail, due to SW-162575.", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[64-fp16]": + "Xfail, due to SW-162575.", + "unit/inference/quantization/test_int4_quantization.py::TestQuantizedInt4::test_zero3_int4_post_init_quant_cpu_offload": + "Xfail, due to SW-162660.", + "unit/inference/quantization/test_int4_quantization.py::TestQuantizedInt4::test_zero3_int4_quantized_initialization": + "Xfail, due to SW-162660.", + "unit/inference/quantization/test_int4_quantization.py::TestQuantizedInt4::test_zero3_int4_post_init_quant": + "Xfail, due to SW-162660.", + "unit/inference/quantization/test_int4_quantization.py::TestQuantizedInt4::test_zero3_int4_quantized_initialization_cpu_offload": + "Xfail, due to SW-162660.", + "unit/runtime/zero/test_zero_context.py::TestSerialContext::test_subclass_param": + "Xfail, due to SW-156783.", + "unit/runtime/zero/test_zero_context_ancestry.py::TestSerialParamInit::test_subclass_param_init": + "Xfail, due to SW-143227.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[cpu-1-dtype1]": + "Xfail, due to SW-145262.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[cpu-3-dtype1]": + "Xfail, due to SW-145262.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[cpu-2-dtype1]": + "Xfail, due to SW-145262.", + "unit/inference/quantization/test_int4_quantization.py::TestQuantizedInt4::test_zero3_int4_quantized_initialization_nvme_offload": + "Xfail, due to SW-164545.", + "unit/inference/quantization/test_int4_quantization.py::TestQuantizedInt4::test_zero3_int4_post_init_quant_nvme_offload": + "Xfail, due to SW-164545.", + "unit/runtime/test_mup_optimizers.py::TestMuPOptimizers::test[False-MuSGD-SGD]": + "Xfail, due to SW-164551.", + "unit/runtime/test_mup_optimizers.py::TestMuPOptimizers::test[True-MuSGD-SGD]": + "Xfail, due to SW-164551.", + "unit/runtime/test_mup_optimizers.py::TestMuPOptimizers::test[True-MuAdam-Adam]": + "Xfail, due to SW-164551.", + "unit/runtime/test_mup_optimizers.py::TestMuPOptimizers::test[False-MuAdam-Adam]": + "Xfail, due to SW-164551.", + "unit/runtime/test_mup_optimizers.py::TestMuPOptimizers::test[True-MuAdamW-AdamW]": + "Xfail, due to SW-164551.", + "unit/runtime/test_mup_optimizers.py::TestMuPOptimizers::test[False-MuAdamW-AdamW]": + "Xfail, due to SW-164551.", + "unit/checkpoint/test_mics_optimizer.py::TestMiCSCheckpoint::test_load_optimizer_state[4]": + "Xfail, due to SW-164577.", + "unit/checkpoint/test_mics_optimizer.py::TestMiCSCheckpoint::test_not_load_optimizer_state[4]": + "Xfail, due to SW-164577.", + "unit/checkpoint/test_mics_optimizer.py::TestMiCSCheckpoint::test_not_load_optimizer_state[1]": + "Xfail, due to SW-164577.", + "unit/checkpoint/test_mics_optimizer.py::TestMiCSCheckpoint::test_load_optimizer_state[2]": + "Xfail, due to SW-164577.", + "unit/checkpoint/test_mics_optimizer.py::TestMiCSCheckpoint::test_load_module_only[4]": + "Xfail, due to SW-164577.", + "unit/checkpoint/test_mics_optimizer.py::TestMiCSCheckpoint::test_load_module_only[1]": + "Xfail, due to SW-164577.", + "unit/checkpoint/test_mics_optimizer.py::TestMiCSCheckpoint::test_not_load_optimizer_state[2]": + "Xfail, due to SW-164577.", + "unit/checkpoint/test_mics_optimizer.py::TestMiCSCheckpoint::test_load_optimizer_state[1]": + "Xfail, due to SW-164577.", + "unit/checkpoint/test_mics_optimizer.py::TestMiCSCheckpoint::test_load_module_only[2]": + "Xfail, due to SW-164577.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_pipeline_checkpoint_loading[3-False]": + "Xfail, due to SW-164593.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[none-3-dtype1]": + "Xfail, due to SW-164593.", + "unit/inference/quantization/test_int4_quantization.py::TestQuantizedInt4::test_quantized_linear": + "Xfail, due to SW-164606.", + "unit/runtime/zero/test_qwzero.py::TesthpZeroConfigSweep::test[20-1024]": + "Xfail, due to SW-156782.", + "unit/runtime/zero/test_qwzero.py::TesthpZeroConfigSweep::test[20-2048]": + "Xfail, due to SW-156782.", + "unit/runtime/zero/test_qwzero.py::TesthpZeroConfigSweep::test[8-2048]": + "Xfail, due to SW-156782.", + "unit/runtime/zero/test_qwzero.py::TesthpZeroConfigSweep::test[8-1024]": + "Xfail, due to SW-156782.", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[EleutherAI/gpt-j-6b-text-generation-fp32-noCG-noTriton]": + "Xfail, due to SW-163098.", + "unit/inference/test_stable_diffusion.py::TestStableDiffusion::test": # noqa: F601 + "Xfail, due to SW-170181.", + "unit/compression/test_dequantization.py::TestDequantization::test_dequantize": # noqa: F601 + "Xfail, due to SW-168442.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[cpu-3-full-dtype1]": + "Xfail, due to SW-164593.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[cpu-3-local-dtype1]": + "Xfail, due to SW-164593.", + "unit/ops/adam/test_hybrid_adam.py::TestHybridAdam::test_hybrid_adam_equal[8-fp16]": # noqa: F601 + "Xfail, due to SW-162575.", + "unit/ops/adam/test_hybrid_adam.py::TestHybridAdam::test_hybrid_adam_equal[16-fp16]": + "Xfail, due to SW-162575.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[none-3-full-dtype1]": # noqa: F601 + "Xfail, due to SW-164593.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[none-3-local-dtype1]": # noqa: F601 + "Xfail, due to SW-164593.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_quantized_linear[4bits-0]": + "Xfail, due to SW-168583.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_quantized_linear[8bits-1]": + "Xfail, due to SW-168583.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_quantized_linear[4bits-1]": + "Xfail, due to SW-168583.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_quantized_linear[8bits-0]": + "Xfail, due to SW-168583.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_post_init_quant[4bits]": + "Xfail, due to SW-162660.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_post_init_quant_cpu_offload[4bits]": + "Xfail, due to SW-162660.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_quantized_initialization[8bits]": + "Xfail, due to SW-162660.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_quantized_initialization_cpu_offload[4bits]": + "Xfail, due to SW-162660.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_post_init_quant_cpu_offload[8bits]": + "Xfail, due to SW-162660.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_quantized_initialization[4bits]": + "Xfail, due to SW-162660.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_quantized_initialization_cpu_offload[8bits]": + "Xfail, due to SW-162660.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_post_init_quant[8bits]": + "Xfail, due to SW-162660.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_post_init_quant_nvme_offload": + "Xfail, due to SW-164545.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_quantized_initialization_nvme_offload": + "Xfail, due to SW-164545.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-False-True-False]": + "Xfail, due to SW-138014.", + "unit/runtime/test_ds_initialize.py::TestClientOptimizer::test[None]": + "Xfail, due to SW-175725.", + "unit/runtime/test_ds_initialize.py::TestConfigOptimizer::test[False]": + "Xfail, due to SW-175725.", + "unit/runtime/test_ds_initialize.py::TestConfigOptimizer::test[True]": + "Xfail, due to SW-175725.", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[AdamW-False-False-False-resulting_optimizer0]": "Xfail due to SW-175725", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[Adam-False-False-True-resulting_optimizer12]": "Xfail due to SW-175725", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[AdamW-False-False-True-resulting_optimizer4]": "Xfail due to SW-175725", + "unit/ops/adam/test_adamw.py::TestAdamConfigs::test[Adam-False-False-False-resulting_optimizer8]": "Xfail due to SW-175725", + "unit/ops/lion/test_lion.py::TestLionConfigs::test[Lion-True-DeepSpeedCPULion]": + "skipping due to HPU is not supported FusedLion, SW-176903", + "unit/ops/lion/test_lion.py::TestLionConfigs::test[Lion-False-FusedLion]": + "skipping due to HPU is not supported FusedLion, SW-176903", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForwardSmallBatchSize::test_forward_with_small_bsz[8-3-1024-512-16-3-True-False]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForwardSmallBatchSize::test_forward_with_small_bsz[8-3-1024-512-16-3-False-False]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-128-128-2-3-True-False]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1536-128-24-3-False-False]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-509-16-3-True-False]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[64-1024-24-16-3-False-False]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-2048-128-32-3-False-False]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-381-16-3-True-False]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-384-16-3-False-False]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[64-1024-56-16-3-False-False]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[3-1024-51-16-3-True-False]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-119-16-3-True-False]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-512-16-3-False-False]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-2560-128-40-3-False-False]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/utils/test_init_on_device.py::TestOnDevice::test_on_device[hpu]" : "Xfail, due to SW-178730.", + "unit/moe/test_moe.py::TestMoE::test[True-0-4]" : "Xfail, due to SW-180322.", + "unit/moe/test_moe.py::TestMoE::test[False-0-2]" : "Xfail, due to SW-180322.", + "unit/moe/test_moe.py::TestMoE::test[True-0-2]" : "Xfail, due to SW-180322.", + "unit/moe/test_moe.py::TestMoE::test[False-0-4]" : "Xfail, due to SW-180322.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[3-True-True]" : "Xfail, due to SW-179864.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[3-False-True]" : "Xfail, due to SW-179864.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[3-True-True]" : "Xfail, due to SW-179864.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[3-False-True]" : "Xfail, due to SW-179864.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[3-False-Adam-True]" : "Xfail, due to SW-179864.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[3-True]" : "Xfail, due to SW-179864.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[3-True-deepspeed_adam-True]" : "Xfail, due to SW-179864.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[3-True]" : "Xfail, due to SW-179864.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_immediate_save_load[3-True]" : "Xfail, due to SW-179864.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[False-False-False-True]" : "Xfail, due to SW-179864.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[False-False-True-True]" : "Xfail, due to SW-179864.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[False-True-True-True]" : "Xfail, due to SW-179864.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[False-True-False-True]" : "Xfail, due to SW-179864.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_not_load_optimizer_state[3-True]" : "Xfail, due to SW-179864.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_module_only[3-True]" : "Xfail, due to SW-179864.", + "unit/checkpoint/test_shared_weights.py::TestCheckpointSharedWeights::test_checkpoint_shared_weights[True]" : "Xfail, due to SW-179861.", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[4-False-True]" : "Xfail, due to SW-179867.", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[2-False-True]" : "Xfail, due to SW-179867.", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[4-True-True]" : "Xfail, due to SW-179867.", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[2-True-True]" : "Xfail, due to SW-179867.", + "unit/checkpoint/test_pipeline.py::TestPipelineCheckpoint::test_checkpoint_pipe_engine[0-True]" : "Xfail, due to SW-179868.", + "unit/checkpoint/test_pipeline.py::TestPipelineCheckpoint::test_checkpoint_pipe_engine[1-True]" : "Xfail, due to SW-179868.", + "unit/checkpoint/test_zero_optimizer.py::TestSaveTensorClone::test_save_tensor_clone[True-1-True]" : "Xfail, due to SW-179873.", + "unit/checkpoint/test_zero_optimizer.py::TestSaveTensorClone::test_save_tensor_clone[True-2-True]" : "Xfail, due to SW-179873.", + "unit/checkpoint/test_zero_optimizer.py::TestSaveTensorClone::test_save_tensor_clone[False-2-True]" : "Xfail, due to SW-179873.", + "unit/checkpoint/test_zero_optimizer.py::TestSaveTensorClone::test_save_tensor_clone[False-1-True]" : "Xfail, due to SW-179873.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_frozen_weights[2-True]" : "Xfail, due to SW-179873.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_custom_frozen_weights[1-True]" : "Xfail, due to SW-179873.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_custom_frozen_weights[2-True]" : "Xfail, due to SW-179873.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_frozen_weights[1-True]" : "Xfail, due to SW-179873.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_pipeline_checkpoint_loading[3-True]" : "Xfail, due to SW-175716.", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[none-3-dtype1]" : "Xfail, due to SW-175716.", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-3-dtype2]" : "Xfail, due to SW-175716.", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[cpu-3-dtype1]" : "Xfail, due to SW-175716.", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-3-dtype0]" : "Xfail, due to SW-175716.", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-3-dtype1]" : "Xfail, due to SW-175716.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-False-True-True]" : "Xfail, due to SW-180488.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-True-True-True]" : "Xfail, due to SW-180488.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-True-False-True]" : "Xfail, due to SW-180488.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-False-False-True]" : "Xfail, due to SW-180488.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[True-True-True-True]" : "Xfail, due to SW-179873.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[True-True-False-True]" : "Xfail, due to SW-179873.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[True-False-False-True]" : "Xfail, due to SW-179873.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[True-False-True-True]" : "Xfail, due to SW-179873.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[cpu-3-full-True]" : "Xfail due to SW-164593", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[none-3-local-True]" : "Xfail due to SW-164593", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[none-3-full-False]" : "Xfail due to SW-164593", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[cpu-3-local-True]" : "Xfail due to SW-164593", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[none-3-local-False]" : "Xfail due to SW-164593", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[none-3-full-True]" : "Xfail due to SW-164593", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[cpu-3-local-False]" : "Xfail due to SW-164593", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[cpu-3-full-False]" : "Xfail due to SW-164593", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[3-True-False]" : "Xfail, due to SW-175716.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[3-True-False]" : "Xfail, due to SW-175716.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[3-False-False]" : "Xfail, due to SW-175716.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[3-False-False]" : "Xfail, due to SW-175716.", + "unit/runtime/zero/test_zero.py::TestZeroPartitionCache::test_training_partition_cache[True]" : "Xfail, due to SW-175716.", + "unit/runtime/zero/test_zero.py::TestZero3DictFwd::test[dict]" : "Xfail, due to SW-175716.", + "unit/runtime/zero/test_zero.py::TestZero3DictFwd::test[tuple]" : "Xfail, due to SW-175716.", + "unit/runtime/zero/test_zero.py::TestZero3DictFwd::test[list]" : "Xfail, due to SW-175716.", + "unit/runtime/zero/test_zero.py::TestZeroFrozenWeights::test[3]" : "Xfail, due to SW-175716.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_module_only[3-False]" : "Xfail, due to SW-175716.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_not_load_optimizer_state[3-False]" : "Xfail, due to SW-175716.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_optimizer_state[3-False]" : "Xfail, due to SW-175716.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[3-True-deepspeed_adam-False]" : "Xfail, due to SW-175716.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[3-True-deepspeed_adam-False]" : "Xfail, due to SW-175716.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[3-False-Adam-False]" : "Xfail, due to SW-175716.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[3-False]" : "Xfail, due to SW-175716.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[3-False-Adam-False]" : "Xfail, due to SW-175716.", + "unit/checkpoint/test_other_optimizer.py::TestOtherOptimizerCheckpoint::test_checkpoint_fused_optimizer[True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_other_optimizer.py::TestOtherOptimizerCheckpoint::test_checkpoint_fp32_optimizer[True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-True-False-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-False-True-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-False-False-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-True-True-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[2-True-deepspeed_adam-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[2-True-deepspeed_adam-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[2-False-Adam-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[2-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[1-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_hybrid_optimizer_state[2-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[1-False-Adam-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_hybrid_optimizer_state[1-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[2-False-Adam-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[1-False-Adam-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[0-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_not_load_optimizer_state[2-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_module_only[2-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_optimizer_state[2-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_module_only[1-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_not_load_optimizer_state[1-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_optimizer_state[1-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[2-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[0-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[1-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[3-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[1-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[2-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[0-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_latest_checkpoint.py::TestLatestCheckpoint::test_existing_latest[True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[0-False-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[1-False-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[1-False-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[2-True-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[0-False-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[2-False-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[2-True-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[2-False-True]" : "Xfail, due to SW-180868.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[3-True-deepspeed_adam-True]" : "Xfail, due to SW-175716.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[3-False-Adam-True]" : "Xfail, due to SW-175716.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_optimizer_state[3-True]" : "Xfail, due to SW-175716.", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[distilbert/distilgpt2-text-generation-fp32-noCG-noTriton]":"Xfail due to SW-182748", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[distilbert/distilgpt2-text-generation-fp16-noCG-noTriton]":"Xfail due to SW-182748", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[distilbert/distilgpt2-text-generation-bf16-noCG-noTriton]":"Xfail due to SW-182748", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-bf16-noCG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-bf16-noCG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp16-CG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp32-noCG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-bf16-CG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp16-noCG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp16-CG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp16-noCG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp16-noCG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-bf16-CG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp16-noCG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-bf16-noCG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-bf16-noCG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-bf16-CG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-bf16-noCG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp16-noCG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-bf16-noCG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-noCG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-bf16-noCG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp16-CG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp32-CG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-bf16-CG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp32-noCG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp32-noCG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp16-noCG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp16-noCG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp32-CG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp16-noCG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-bf16-CG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp32-noCG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp32-noCG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-bf16-CG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-bf16-noCG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp32-noCG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp16-noCG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp32-CG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp32-CG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp16-CG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-bf16-CG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp32-CG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-bf16-CG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp32-CG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp32-noCG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp32-CG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-CG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp32-CG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp16-CG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp32-CG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-bf16-noCG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp16-CG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp16-CG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp16-CG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp32-CG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp16-CG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-bf16-CG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp16-CG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp16-CG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-bf16-CG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp32-noCG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-bf16-CG-noTriton]":"xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[openai-community/gpt2-text-generation-bf16-noCG-noTriton]":"Xfail due to SW-181935", + "unit/accelerator/test_accelerator.py::test_abstract_methods_defined[deepspeed.accelerator.xpu_accelerator]":"Xfail due to SW-182749", + "unit/launcher/test_user_args.py::test_user_args[True-I'm going to tell them \"DeepSpeed is the best\"]":"Xfail due to SW-182753", + "unit/launcher/test_user_args.py::test_user_args[True-'\"translate English to Romanian: \"']":"Xfail due to SW-182753", + "unit/launcher/test_user_args.py::test_user_args[True-'I am 72\" tall']":"Xfail due to SW-182753", + "unit/launcher/test_user_args.py::test_user_args[True-\"I am 6' tall\"]":"Xfail due to SW-182753", + "unit/runtime/zero/test_zero.py::TestParamPartitioningSkipInit::test":"Xfail due to SW-181939", + "unit/runtime/zero/test_zero.py::TestZero3RepeatForwardLoop::test[True]":"Xfail due to SW-181939", + "unit/inference/test_human_eval.py::test_human_eval[codellama/CodeLlama-7b-Python-hf]":"Xfail due to SW-182759", + "unit/runtime/comm/test_coalesced_collectives.py::TestAllToAllQuantReduceFallback::test_1d_tensor":"Xfail due to SW-182766", + "unit/runtime/comm/test_coalesced_collectives.py::TestAllToAllQuantReduceFallback::test_non_divisible":"Xfail due to SW-182766", + "unit/inference/test_inference.py::TestLMCorrectness::test[lambada_standard-gpt2-openai-community/gpt2-xl]":"xfail due to model download", + "unit/runtime/zero/test_zero.py::TestZeroAdamOptimizerStepCount::test[3]":"XFail due to SW-181940", + "unit/inference/test_inference.py::TestModelTask::test[distilbert/distilgpt2-text-generation-bf16-noCG-noTriton]":"Xfail due to SW-181935", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-bf16-CG-noTriton]": "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[deepset/roberta-base-squad2-question-answering-bf16-noCG-noTriton]": "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-bf16-noCG-noTriton]": "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[Jean-Baptiste/roberta-large-ner-english-token-classification-bf16-noCG-noTriton]": "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[deepset/roberta-base-squad2-question-answering-bf16-CG-noTriton]": "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[deepset/minilm-uncased-squad2-question-answering-bf16-CG-noTriton]": "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[j-hartmann/emotion-english-distilroberta-base-text-classification-bf16-CG-noTriton]": "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-bf16-noCG-noTriton]": "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[dslim/bert-base-NER-token-classification-bf16-noCG-noTriton]": "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-bf16-CG-noTriton]": "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[deepset/minilm-uncased-squad2-question-answering-bf16-noCG-noTriton]": "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[j-hartmann/emotion-english-distilroberta-base-text-classification-bf16-noCG-noTriton]": "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[dslim/bert-base-NER-token-classification-bf16-CG-noTriton]": "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[Jean-Baptiste/roberta-large-ner-english-token-classification-bf16-CG-noTriton]": "xfail due to SW-184834", + "unit/hybrid_engine/test_he_llama.py::TestHybridEngineLlama::test_functionality[huggyllama/llama-7b-bsz=2]":" xfail due to SW-185015", + "unit/hybrid_engine/test_he_llama.py::TestHybridEngineLlama::test_functionality[huggyllama/llama-7b-bsz=1]":" xfail due to SW-185015", +} + +g3_lazy_xfail_tests = { + "unit/accelerator/test_accelerator.py::test_abstract_methods_defined[deepspeed.accelerator.xpu_accelerator]": + "Xfail due to SW-182749", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[True-True-1-dtype2]": + "Xfail due to SW-187590", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[True-True-1-dtype1]": + "Xfail due to SW-187590", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[False-True-1-dtype2]": + "Xfail due to SW-187590", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[True-True-1-dtype2]": + "Xfail due to SW-187590", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[True-True-1-dtype0]": + "Xfail due to SW-187590", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[False-True-1-dtype0]": + "Xfail due to SW-187590", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[False-True-1-dtype0]": + "Xfail due to SW-187590", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[False-True-1-dtype0]": + "Xfail due to SW-187590", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[True-True-1-dtype0]": + "Xfail due to SW-187590", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[False-True-1-dtype1]": + "Xfail due to SW-187590", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[True-True-1-dtype2]": + "Xfail due to SW-187590", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[True-True-1-dtype0]": + "Xfail due to SW-187590", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[True-True-1-dtype1]": + "Xfail due to SW-187590", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[False-True-1-dtype2]": + "Xfail due to SW-187590", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[True-True-1-dtype1]": + "Xfail due to SW-187590", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[False-True-1-dtype2]": + "Xfail due to SW-187590", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[False-True-1-dtype1]": + "Xfail due to SW-187590", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[False-True-1-dtype1]": + "Xfail due to SW-187590", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-True-False-False]": + "Xfail due to SW-186949", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-False-True-False]": + "Xfail due to SW-186949", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-True-False-False]": + "Xfail due to SW-186949", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-True-True-False]": + "Xfail due to SW-186949", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-False-False-False]": + "Xfail due to SW-186949", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-True-True-False]": + "Xfail due to SW-186949", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-False-True-False]": + "Xfail due to SW-186949", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-False-False-False]": + "Xfail due to SW-186949", + "unit/compression/test_dequantization.py::TestDequantization::test_dequantize": + "Xfail due to SW-168442", + "unit/hybrid_engine/test_he_llama.py::TestHybridEngineLlama::test_functionality[huggyllama/llama-7b-bsz=2]": + "xfail due to SW-185015", + "unit/hybrid_engine/test_he_llama.py::TestHybridEngineLlama::test_functionality[huggyllama/llama-7b-bsz=1]": + "xfail due to SW-185015", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_quantized_linear[8bits-0]": + "Xfail, due to SW-168583", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_post_init_quant_nvme_offload": + "Xfail, due to SW-168583", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_quantized_linear[4bits-1]": + "Xfail, due to SW-168583", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_quantized_linear[4bits-0]": + "Xfail, due to SW-168583", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_post_init_quant[8bits]": + "Xfail, due to SW-168583", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_post_init_quant_cpu_offload[4bits]": + "Xfail, due to SW-168583", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_quantized_initialization[4bits]": + "Xfail, due to SW-168583", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_quantized_initialization_cpu_offload[4bits]": + "Xfail, due to SW-168583", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_quantized_initialization_nvme_offload": + "Xfail, due to SW-168583", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_post_init_quant_cpu_offload[8bits]": + "Xfail, due to SW-168583", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_post_init_quant[4bits]": + "Xfail, due to SW-168583", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_quantized_initialization_cpu_offload[8bits]": + "Xfail, due to SW-168583", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_quantized_initialization[8bits]": + "Xfail, due to SW-168583", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_quantized_linear[8bits-1]": + "Xfail, due to SW-168583", + "unit/inference/test_model_profiling.py::TestModelProfiling::test[True-True]": + "Xfail, due to SW-163097", + "unit/inference/test_model_profiling.py::TestModelProfiling::test[True-False]": + "Xfail, due to SW-163097", + "unit/inference/test_model_profiling.py::TestModelProfiling::test[False-True]": + "Xfail, due to SW-163097", + "unit/inference/test_model_profiling.py::TestModelProfiling::test[False-False]": + "Xfail, due to SW-163097", + "unit/inference/test_stable_diffusion.py::TestStableDiffusion::test": + "Xfail, due to SW-170181.", + "unit/launcher/test_user_args.py::test_user_args[True-I'm going to tell them \"DeepSpeed is the best\"]": + "Xfail due to SW-182753", + "unit/launcher/test_user_args.py::test_user_args[True-'\"translate English to Romanian: \"']": + "Xfail due to SW-182753", + "unit/launcher/test_user_args.py::test_user_args[True-'I am 72\" tall']": + "Xfail due to SW-182753", + "unit/launcher/test_user_args.py::test_user_args[True-\"I am 6' tall\"]": + "Xfail due to SW-182753", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-160-128-2-3-False-True]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-512-16-3-False-False]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[64-1024-56-16-3-False-False]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-2048-128-32-3-False-False]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[64-160-128-2-24-False-True]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-511-16-3-False-True]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-384-16-3-False-True]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-509-16-3-True-False]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-119-16-3-True-False]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[3-1024-51-16-3-True-False]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-384-16-3-True-True0]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-256-52-4-3-True-True]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1536-128-24-3-False-True]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-2560-128-40-3-False-True]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1600-128-2-3-True-True]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-4096-128-64-3-True-True]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[64-1024-24-16-3-False-False]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1600-128-25-3-True-True]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1600-128-25-3-False-True]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-381-16-3-True-False]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[3-1024-54-16-3-True-True]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-2048-128-32-3-False-True]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-160-128-2-3-True-True]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-384-16-3-True-True1]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[64-1024-53-16-3-False-True]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-120-16-3-True-True]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-128-128-2-3-True-False]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-512-16-3-True-True]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-384-16-3-False-False]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-2560-128-40-3-False-False]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-8192-128-64-3-False-True]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[1-256-2048-32-3-True-True]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1536-128-24-3-False-False]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-128-128-2-3-True-True]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[64-1024-21-16-3-False-True]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForwardSmallBatchSize::test_forward_with_small_bsz[8-7-1024-512-16-3-True-True]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForwardSmallBatchSize::test_forward_with_small_bsz[8-7-1024-512-16-3-False-True]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForwardSmallBatchSize::test_forward_with_small_bsz[8-3-1024-512-16-3-True-False]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForwardSmallBatchSize::test_forward_with_small_bsz[8-3-1024-512-16-3-False-False]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_backward.py::TestCUDABackward::test_backward[64-1600-128-2-4-False-True-0.2]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_backward.py::TestCUDABackward::test_backward[8-1600-128-2-3-True-True-0.05]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_backward.py::TestCUDABackward::test_backward[64-160-128-2-24-False-True-0.2]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_backward.py::TestCUDABackward::test_backward[8-1600-128-25-3-True-True-0.05]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/accelerators/test_accelerator_backward.py::TestCUDABackward::test_backward[8-160-128-2-3-True-True-0.1]": + "skipping due to TransformerBuilder is not supported by HPU, SW-176906", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[64-fp16]": + "Xfail, due to SW-182502", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[128-fp16]": + "Xfail, due to SW-182502", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[22-fp16]": + "Xfail, due to SW-182502", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[1048576-fp16]": + "Xfail, due to SW-182502", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[1024-fp16]": + "Xfail, due to SW-182502", + "unit/ops/adam/test_hybrid_adam.py::TestHybridAdam::test_hybrid_adam_equal[16-fp16]": + "Xfail, due to SW-182502", + "unit/ops/adam/test_hybrid_adam.py::TestHybridAdam::test_hybrid_adam_equal[8-fp16]": + "Xfail, due to SW-182502", + "unit/ops/lion/test_lion.py::TestLionConfigs::test[Lion-False-FusedLion]": + "Xfail, due to SW-176903", + "unit/ops/lion/test_lion.py::TestLionConfigs::test[Lion-True-DeepSpeedCPULion]": + "Xfail, due to SW-176903", + "unit/ops/transformer/inference/test_bias_geglu.py::test_bias_geglu[dtype1-512-1-1]": + "Xfail flaky", + "unit/ops/transformer/inference/test_bias_geglu.py::test_gated_silu[dtype0-512-1-1]": + "Xfail flaky", + "unit/runtime/zero/test_zero.py::TestZero3RepeatForwardLoop::test[True]": + "Xfail due to SW-181939", + "unit/runtime/zero/test_zero.py::TestParamPartitioningSkipInit::test": + "Xfail due to SW-181939", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[1-9-1024]": + "Xfail, due to SW-164239", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[4-9-1024]": + "Xfail, due to SW-164239", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[2-9-1024]": + "Xfail, due to SW-164239", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConvergence::test[gpt2]": + "XFail for now", + "unit/runtime/zero/test_zero_context.py::TestSerialContext::test_subclass_param": + "Xfail, due to SW-156783", + "unit/runtime/zero/test_zero_context_ancestry.py::TestSerialParamInit::test_subclass_param_init": + "Xfail, due to SW-143227.", + "unit/runtime/zero/test_zero_nesting_init.py::TestNestedParallelInit::test_nested_parallel_init": + "Xfail download issue", + "unit/runtime/comm/test_coalesced_collectives.py::TestAllToAllQuantReduceFallback::test_non_divisible": + "Xfail due to SW-182766", + "unit/runtime/comm/test_coalesced_collectives.py::TestAllToAllQuantReduceFallback::test_1d_tensor": + "Xfail due to SW-182766", + "unit/runtime/test_mup_optimizers.py::TestMuPOptimizers::test[True-MuAdam-Adam]": + "Xfail, due to SW-164551", + "unit/runtime/test_mup_optimizers.py::TestMuPOptimizers::test[False-MuAdam-Adam]": + "Xfail, due to SW-164551", + "unit/runtime/test_mup_optimizers.py::TestMuPOptimizers::test[False-MuSGD-SGD]": + "Xfail, due to SW-164551", + "unit/runtime/test_mup_optimizers.py::TestMuPOptimizers::test[False-MuAdamW-AdamW]": + "Xfail, due to SW-164551", + "unit/runtime/test_mup_optimizers.py::TestMuPOptimizers::test[True-MuSGD-SGD]": + "Xfail, due to SW-164551", + "unit/runtime/test_mup_optimizers.py::TestMuPOptimizers::test[True-MuAdamW-AdamW]": + "Xfail, due to SW-164551", + "unit/inference/test_inference.py::TestMPSize::test[fp16-bloom]": + "Xfail due to RuntimeError: Incompatible input shapes, broadcast not possible. Tensor1 Size: 5 5 16 1 Tensor2 Size: 5 1 8During handling of the above exception, another exception occurred", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[distilbert/distilgpt2-text-generation-fp16-noCG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[distilbert/distilgpt2-text-generation-fp32-noCG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp16-CG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp32-CG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp32-noCG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp16-CG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp16-CG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp16-noCG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp16-noCG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp32-CG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp16-CG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp32-noCG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp32-noCG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp32-CG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp32-CG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp16-CG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp16-CG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp32-noCG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp16-CG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp16-noCG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp32-noCG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp32-noCG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp32-noCG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp16-noCG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp16-CG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp32-CG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp16-CG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp16-noCG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp16-noCG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp32-CG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp32-noCG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp32-noCG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp16-noCG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp16-noCG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp16-CG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp32-noCG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp16-noCG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp32-CG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp16-CG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp32-noCG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp16-noCG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp16-CG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp32-noCG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp32-CG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp16-noCG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp16-noCG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp32-CG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp16-noCG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp32-noCG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp16-noCG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp32-noCG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp16-CG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp16-noCG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp16-CG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-noCG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp32-CG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp32-CG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp16-noCG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp16-CG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-CG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp32-CG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp32-CG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp32-noCG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp32-CG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_human_eval.py::test_human_eval[codellama/CodeLlama-7b-Python-hf]": + "Xfail due to SW-182759", +} +hpu_eager_xfail_tests = {} + +g1_eager_xfail_tests = { + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp32-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp32-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-bf16-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp32-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp32-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp32-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-bf16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-bf16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-bf16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp32-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp32-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-bf16-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp32-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-bf16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp32-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-bf16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-bf16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp32-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp32-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-bf16-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-bf16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-bf16-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_stable_diffusion.py::TestStableDiffusion::test": + "Xfail, due to SW-170181.", + "unit/runtime/test_autocast.py::TestAutoCastDisable::test_missing_amp_autocast[True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/comm/test_coalesced_collectives.py::TestReduceScatterCoalescedTensorSmallerThanWorldSize::test": + "float16/half is not supported on Gaudi.", + "unit/runtime/comm/test_coalesced_collectives.py::TestReduceScatterCoalesced::test_two_inputs": + "float16/half is not supported on Gaudi.", + "unit/runtime/comm/test_coalesced_collectives.py::TestReduceScatterCoalesced::test_single_input": + "float16/half is not supported on Gaudi.", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[64-fp16]": + "float16/half is not supported on Gaudi.", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[1024-fp16]": + "float16/half is not supported on Gaudi.", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[22-fp16]": + "float16/half is not supported on Gaudi.", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[1048576-fp16]": + "float16/half is not supported on Gaudi.", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[128-fp16]": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_ds_initialize.py::TestNoOptim::test[3]": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_ds_initialize.py::TestOptimizerImplementation::test[fp16-fp32-zero3]": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_ds_initialize.py::TestOptimizerImplementation::test[fp16-bf16-zero3]": + "float16/half is not supported on Gaudi.", + "unit/ops/adam/test_hybrid_adam.py::TestHybridAdam::test_hybrid_adam_equal[8-fp16]": + "float16/half is not supported on Gaudi.", + "unit/ops/adam/test_hybrid_adam.py::TestHybridAdam::test_hybrid_adam_equal[16-fp16]": + "float16/half is not supported on Gaudi.", + "unit/utils/test_init_on_device.py::TestOnDevice::test_on_device[hpu]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_quantized_initialization_cpu_offload[4bits]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_post_init_quant[4bits]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_quantized_linear[4bits-1]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_half_int8_quantization": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_model_quantization[8bits]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_quantized_initialization[8bits]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_model_quantization[4bits]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_half_int4_quantization": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_post_init_quant_cpu_offload[8bits]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_post_init_quant_nvme_offload": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_quantized_linear[8bits-1]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_post_init_quant[8bits]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_quantized_initialization_nvme_offload": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_post_init_quant_cpu_offload[4bits]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_quantized_initialization_cpu_offload[8bits]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_quantized_linear[4bits-0]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_quantized_initialization[4bits]": + "float16/half is not supported on Gaudi.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_quantized_linear[8bits-0]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[3-False-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[3-True-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[3-False-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[3-True-False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroFrozenWeights::test[3]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZero3DictFwd::test[dict]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZero3DictFwd::test[tuple]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZero3DictFwd::test[list]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZero3InitForParentWeightInitialization::test": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZero3ParamPartitioningLargeParam::test[False-True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZero3ParamPartitioningLargeParam::test[True-True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZero3ParamPartitioningManyParams::test[True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroPartitionCache::test_training_partition_cache[False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroPartitionCache::test_training_partition_cache[True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_context.py::TestScatterGather::test": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_context.py::TestGatherUpdate::test": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_context.py::TestSerialContext::test_scatter_halftype": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_context.py::TestSerialContext::test_subclass_param": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_context_ancestry.py::TestDSInitWZinit::test": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_context_ancestry.py::TestSerialParamInit::test_subclass_param_init": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_module_only[3-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_optimizer_state[3-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_not_load_optimizer_state[3-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestSaveTensorClone::test_save_tensor_clone[False-2-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestSaveTensorClone::test_save_tensor_clone[True-2-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestSaveTensorClone::test_save_tensor_clone[False-1-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestSaveTensorClone::test_save_tensor_clone[True-1-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[3-True-deepspeed_adam-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_pipeline_checkpoint_loading[3-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[3-False-Adam-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[3-False-Adam-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[3-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[3-True-deepspeed_adam-False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[cpu-3-full-True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[none-3-local-False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[none-3-local-True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[none-3-full-False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[cpu-3-local-True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[none-3-full-True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[cpu-3-local-False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[cpu-3-full-False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[cpu-3-local-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[cpu-3-full-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[none-3-local-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[none-3-full-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/half_precision/test_bf16.py::TestZeroDtypeCocktail::test[default-fp16]": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_data_efficiency.py::TestDataEfficiency::test_curriculum_learning": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_data_efficiency.py::TestLegacyCurriculumScheduler::test_fixed_linear": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_data_efficiency.py::TestLegacyCurriculumScheduler::test_fixed_discrete": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_ds_config_dict.py::TestDeprecatedDeepScaleConfig::test": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_ds_config_dict.py::TestDistInit::test": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_ds_config_dict.py::TestConfigLoad::test_hjson": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_ds_config_dict.py::TestConfigLoad::test_json": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_ds_config_dict.py::TestConfigLoad::test_dict": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_ds_config_dict.py::TestArgs::test_no_args": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_ds_config_dict.py::TestArgs::test_none_args": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_ds_config_dict.py::TestInitNoOptimizer::test": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_ds_initialize.py::TestNoOptim::test[0]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_ignore_unused_parameters.py::TestStage2IgnoreUnusedParameters::test[False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_ignore_unused_parameters.py::TestStage2IgnoreUnusedParameters::test[True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[0-False-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[2-False-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[1-False-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[0-False-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[2-False-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[2-True-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[1-False-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[2-True-False]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe.py::TestMoE::test[False-0-4]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe.py::TestMoE::test[True-0-4]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe.py::TestMoE::test[False-0-2]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe.py::TestMoE::test[False-1-4]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe.py::TestMoE::test[False-1-2]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe.py::TestMoE::test[True-2-2]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe.py::TestMoE::test[True-1-4]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe.py::TestMoE::test[True-1-2]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe.py::TestMoE::test[True-0-2]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe.py::TestMoE::test[False-2-2]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe.py::TestMoE::test[False-2-4]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe.py::TestMoE::test[True-2-4]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe.py::TestPRMoE::test[2-False]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe.py::TestPRMoE::test[2-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[4-False-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[4-True-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[2-True-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[2-False-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe[4]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe_tp.py::TestMOETensorParallel::test[True-False-1-4]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe_tp.py::TestMOETensorParallel::test[False-False-1-2]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe_tp.py::TestMOETensorParallel::test[False-False-2-2]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe_tp.py::TestMOETensorParallel::test[True-False-1-2]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe_tp.py::TestMOETensorParallel::test[True-True-2-2]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe_tp.py::TestMOETensorParallel::test[False-True-1-4]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe_tp.py::TestMOETensorParallel::test[False-True-1-2]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe_tp.py::TestMOETensorParallel::test[False-False-1-4]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe_tp.py::TestMOETensorParallel::test[False-True-2-2]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe_tp.py::TestMOETensorParallel::test[True-True-1-2]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe_tp.py::TestMOETensorParallel::test[True-True-1-4]": + "float16/half is not supported on Gaudi.", + "unit/moe/test_moe_tp.py::TestMOETensorParallel::test[True-False-2-2]": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_mup_optimizers.py::TestMuPOptimizers::test[True-MuAdamW-AdamW]": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_mup_optimizers.py::TestMuPOptimizers::test[False-MuSGD-SGD]": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_mup_optimizers.py::TestMuPOptimizers::test[True-MuSGD-SGD]": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_mup_optimizers.py::TestMuPOptimizers::test[False-MuAdamW-AdamW]": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_mup_optimizers.py::TestMuPOptimizers::test[False-MuAdam-Adam]": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_mup_optimizers.py::TestMuPOptimizers::test[True-MuAdam-Adam]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_other_optimizer.py::TestOtherOptimizerCheckpoint::test_checkpoint_fused_optimizer[False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_pipeline.py::TestPipelineCheckpoint::test_checkpoint_pipe_engine[1-False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_pld.py::TestNonPLDModel::test_non_pld_model": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_pld.py::TestPLDModel::test_pld_model[0.1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_pld.py::TestPLDModel::test_pld_model[0.9]": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_pld.py::TestPLDModel::test_pld_model[1.0]": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_pld.py::TestPLDModel::test_pld_model[0]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestPartitionNcclAlignment::test": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroOffloadOptim::test[True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroOffloadOptim::test[False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestEmptyParameterGroup::test_empty_param_groups[dtype1-False-False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestEmptyParameterGroup::test_empty_param_groups[dtype1-True-False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestEmptyParameterGroup::test_empty_param_groups[dtype1-True-True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestEmptyParameterGroup::test_empty_param_groups[dtype1-False-True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZero3ParamPartitioningBase::test_fp16_enabled[True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroFrozenWeights::test[2]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroFrozenWeights::test[1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroAdamOptimizerStepCount::test[3]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroAdamOptimizerStepCount::test[2]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroAdamOptimizerStepCount::test[1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZero3RepeatForwardLoop::test": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestIncorectAllgatherBucketSize::test[1000]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestIncorectAllgatherBucketSize::test[1001]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroToFP32::test_2_param_groups[True-2]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroToFP32::test_1_param_group[True-2]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroToFP32::test_1_param_group[False-3]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroToFP32::test_2_param_groups[False-2]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroToFP32::test_1_param_group[False-2]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroToFP32::test_1_param_group[True-3]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroToFP32::test_2_param_groups[False-3]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroToFP32::test_2_param_groups[True-3]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZero3ParamPartitioningLargeParam::test[False-False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZero3ParamPartitioningLargeParam::test[True-False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZero3ParamPartitioningManyParams::test[False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroOffloadStage1::test": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroUnbalancedGradients::test[1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroUnbalancedGradients::test[2]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZeroUnbalancedGradients::test[3]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_context.py::TestSerialContext::test_ext_param_getattr": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_context_return.py::TestReturnParam::test_stage_3_output_type[tensor]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_context_return.py::TestReturnParam::test_stage_3_output_type[None]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_context_return.py::TestReturnParam::test_stage_3_output_type[dict]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_context_return.py::TestReturnParam::test_ext_param_return": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_offloadpp.py::TestZeroPartialOffloadConfigSweep::test[4-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_offloadpp.py::TestZeroPartialOffloadConfigSweep::test[8-1024]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_custom_frozen_weights[2-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_optimizer_state[2-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_custom_frozen_weights[1-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_not_load_optimizer_state[1-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_not_load_optimizer_state[2-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_frozen_weights[1-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_frozen_weights[2-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_module_only[2-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_module_only[1-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_optimizer_state[1-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[2-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[3-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_immediate_save_load[3-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_immediate_save_load[0-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[1-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_immediate_save_load[2-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[3-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[0-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[2-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_immediate_save_load[1-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[0-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[1-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_hybrid_optimizer_state[2-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[2-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[2-False-Adam-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[1-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[2-False-Adam-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[2-True-deepspeed_adam-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_hybrid_optimizer_state[1-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[0-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[2-True-deepspeed_adam-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[1-False-Adam-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[1-False-Adam-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-False-False-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-True-True-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-True-True-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-False-True-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-False-True-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-True-False-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-False-False-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-True-False-False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[cpu-1-full-True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[cpu-2-full-False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[none-2-full-False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[cpu-2-full-True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[cpu-1-full-False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[none-1-full-False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[none-2-full-True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[none-1-full-True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[none-1-full-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[none-2-full-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[cpu-1-full-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[cpu-2-full-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[False-True-True-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[True-True-False-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[True-False-False-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[True-False-True-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[True-True-True-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[False-False-False-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[False-False-True-False]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[False-True-False-False]": + "float16/half is not supported on Gaudi.", + "unit/compression/test_dequantization.py::TestDequantization::test_dequantize": + "Xfail, due to SW-168442.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[3-False-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[3-True-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[3-False-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[3-True-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[3-False-Adam-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[3-True-deepspeed_adam-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[3-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[3-True-deepspeed_adam-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[3-False-Adam-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_pipeline_checkpoint_loading[3-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestSaveTensorClone::test_save_tensor_clone[True-1-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestSaveTensorClone::test_save_tensor_clone[False-1-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestSaveTensorClone::test_save_tensor_clone[True-2-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestSaveTensorClone::test_save_tensor_clone[False-2-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_not_load_optimizer_state[3-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_optimizer_state[3-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_module_only[3-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_pipeline.py::TestPipelineCheckpoint::test_checkpoint_pipe_engine[1-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[0-False-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[1-False-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[1-False-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[2-True-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[2-False-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[2-False-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[2-True-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[0-False-True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/compile/test_load_config.py::TestConfigLoad::test_set_compile_kwargs": + "float16/half is not supported on Gaudi.", + "unit/runtime/compile/test_load_config.py::TestConfigLoad::test_compile_disabled": + "float16/half is not supported on Gaudi.", + "unit/runtime/compile/test_load_config.py::TestConfigLoad::test_set_compiler_fn": + "float16/half is not supported on Gaudi.", + "unit/runtime/compile/test_load_config.py::TestConfigLoad::test_compile_kwargs": + "float16/half is not supported on Gaudi.", + "unit/runtime/compile/test_load_config.py::TestConfigLoad::test_compile": + "float16/half is not supported on Gaudi.", + "unit/runtime/compile/test_load_config.py::TestConfigLoad::test_custom_backend": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_other_optimizer.py::TestOtherOptimizerCheckpoint::test_checkpoint_fused_optimizer[True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/compile/test_compile_wrapper.py::TestCustomMethod::test_custom_function": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[4-False-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[2-False-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[4-True-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[2-True-True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[cpu-1-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[cpu-2-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[cpu-3-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[none-3-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[none-1-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[none-2-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-3-dtype1]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[1-False-Adam-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[1-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[0-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[2-True-deepspeed_adam-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[2-False-Adam-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_hybrid_optimizer_state[1-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[2-False-Adam-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[1-False-Adam-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_hybrid_optimizer_state[2-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[2-True-deepspeed_adam-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[2-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-False-False-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-True-False-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-False-True-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-True-False-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-False-False-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-True-True-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-False-True-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-True-True-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_not_load_optimizer_state[1-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_frozen_weights[1-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_frozen_weights[2-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_optimizer_state[2-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_not_load_optimizer_state[2-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_module_only[2-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_module_only[1-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_custom_frozen_weights[1-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_custom_frozen_weights[2-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_optimizer_state[1-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_immediate_save_load[0-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_immediate_save_load[2-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[2-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[3-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[1-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_immediate_save_load[1-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[1-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[2-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[3-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[0-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_immediate_save_load[3-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[0-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[False-True-False-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[True-False-True-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[True-True-False-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[True-False-False-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[True-True-True-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[False-False-False-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[False-False-True-True]": + "float16/half is not supported on Gaudi.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_change_dp[False-True-True-True]": + "float16/half is not supported on Gaudi.", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShardinAutoTP::test[EleutherAI/gpt-j-6B]": + "float16/half is not supported on Gaudi.", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShardinAutoTP::test[EleutherAI/gpt-neo-125M]": + "float16/half is not supported on Gaudi.", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShardinAutoTP::test[bigscience/bloom-560m]": + "float16/half is not supported on Gaudi.", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShardinAutoTP::test[facebook/opt-125m]": + "float16/half is not supported on Gaudi.", + "unit/inference/test_checkpoint_sharding.py::TestCheckpointShardinAutoTP::test[facebook/opt-350m]": + "float16/half is not supported on Gaudi.", + "unit/inference/test_inference.py::TestAutoTensorParallelism::test_odd_world_size[fp16-marian]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[2-4-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[4-4-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[4-9-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[1-4-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[1-9-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[2-9-1024]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-1232-128-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-1232-1-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-4096-128-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-1232-1-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-1232-1-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-512-128-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-1232-128-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-4096-255-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-1232-128-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-512-128-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-4096-255-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-512-1-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-1232-255-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-1232-255-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-4096-1-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-4096-1-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-512-1-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-512-255-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-512-128-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-512-255-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-4096-255-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-4096-128-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-512-128-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-512-255-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-4096-128-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-4096-1-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-1232-1-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-512-1-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-1232-255-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-512-255-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-512-1-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-4096-128-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-4096-1-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-4096-255-1]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[True-dtype0-1232-128-2]": + "float16/half is not supported on Gaudi.", + "unit/ops/transformer/inference/test_gelu.py::test_gelu[False-dtype0-1232-255-2]": + "float16/half is not supported on Gaudi.", + "unit/hybrid_engine/test_he_all.py::TestHybridEngineTextGen::test_functionality[EleutherAI/gpt-neo-1.3B-bsz=2]": + "float16/half is not supported on Gaudi.", + "unit/hybrid_engine/test_he_all.py::TestHybridEngineTextGen::test_functionality[facebook/opt-1.3b-bsz=1]": + "float16/half is not supported on Gaudi.", + "unit/hybrid_engine/test_he_all.py::TestHybridEngineTextGen::test_functionality[facebook/opt-1.3b-bsz=2]": + "float16/half is not supported on Gaudi.", + "unit/hybrid_engine/test_he_all.py::TestHybridEngineTextGen::test_functionality[EleutherAI/gpt-neo-1.3B-bsz=1]": + "float16/half is not supported on Gaudi.", + "unit/hybrid_engine/test_he_llama.py::TestHybridEngineLlama::test_functionality[huggyllama/llama-7b-bsz=2]": + "float16/half is not supported on Gaudi.", + "unit/hybrid_engine/test_he_llama.py::TestHybridEngineLlama::test_functionality[huggyllama/llama-7b-bsz=1]": + "float16/half is not supported on Gaudi.", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[none-bigscience/bloom-560m-zero_stage=2-bsz=1]": + "float16/half is not supported on Gaudi.", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[cpu-bigscience/bloom-560m-zero_stage=3-bsz=1]": + "float16/half is not supported on Gaudi.", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[cpu-facebook/opt-350m-zero_stage=3-bsz=1]": + "float16/half is not supported on Gaudi.", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[none-facebook/opt-350m-zero_stage=2-bsz=1]": + "float16/half is not supported on Gaudi.", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[cpu-EleutherAI/gpt-neo-125m-zero_stage=3-bsz=1]": + "float16/half is not supported on Gaudi.", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[cpu-facebook/opt-350m-zero_stage=2-bsz=1]": + "float16/half is not supported on Gaudi.", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[none-EleutherAI/gpt-neo-125m-zero_stage=3-bsz=1]": + "float16/half is not supported on Gaudi.", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[cpu-EleutherAI/gpt-neo-125m-zero_stage=2-bsz=1]": + "float16/half is not supported on Gaudi.", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[cpu-bigscience/bloom-560m-zero_stage=2-bsz=1]": + "float16/half is not supported on Gaudi.", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[none-bigscience/bloom-560m-zero_stage=3-bsz=1]": + "float16/half is not supported on Gaudi.", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[none-facebook/opt-350m-zero_stage=3-bsz=1]": + "float16/half is not supported on Gaudi.", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[none-EleutherAI/gpt-neo-125m-zero_stage=2-bsz=1]": + "float16/half is not supported on Gaudi.", + "unit/inference/test_inference.py::TestMPSize::test[bf16-gpt-neo]": + "Xfail, due to SW-175376.", + "unit/inference/test_inference.py::TestMPSize::test[fp32-gpt-neo]": + "Xfail, due to SW-175376.", + "unit/launcher/test_user_args.py::test_user_args[True-I'm going to tell them \"DeepSpeed is the best\"]": + "Xfail due to SW-182753", + "unit/launcher/test_user_args.py::test_user_args[True-'\"translate English to Romanian: \"']": + "Xfail due to SW-182753", + "unit/launcher/test_user_args.py::test_user_args[True-'I am 72\" tall']": + "Xfail due to SW-182753", + "unit/launcher/test_user_args.py::test_user_args[True-\"I am 6' tall\"]": + "Xfail due to SW-182753", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test_gradient_accumulation[1-9-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test_gradient_accumulation[4-9-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test_gradient_accumulation[2-9-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test_eval[1-9-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test_eval[2-9-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test_eval[4-9-1024]": + "float16/half is not supported on Gaudi.", + "unit/runtime/comm/test_coalesced_collectives.py::TestAllToAllQuantReduceFallback::test_1d_tensor": + "float16/half is not supported on Gaudi.", + "unit/runtime/comm/test_coalesced_collectives.py::TestAllToAllQuantReduceFallback::test_non_divisible": + "float16/half is not supported on Gaudi.", + "unit/runtime/test_ds_initialize.py::TestOptimizerImplementation::test[bf16-bf16-None]": + "float16/half is not supported on Gaudi.", + "unit/utils/test_init_on_device.py::TestOnDevice::test_on_device[hpu:0]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestParamPartitioningSkipInit::test": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZero3RepeatForwardLoop::test[True]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero.py::TestZero3RepeatForwardLoop::test[False]": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_leaf_module.py::TestSetZ3LeafModule::test_choose_module_by_counter": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_leaf_module.py::TestSetZ3LeafModule::test_choose_module_by_rank": + "float16/half is not supported on Gaudi.", + "unit/runtime/zero/test_zero_leaf_module.py::TestSetZ3LeafModule::test_no_grad_input_error": + "float16/half is not supported on Gaudi.", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-bf16-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-bf16-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp32-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-bf16-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-bf16-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-bf16-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-bf16-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-bf16-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-bf16-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-bf16-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-bf16-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp32-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-bf16-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp32-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp32-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp32-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-bf16-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp32-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp32-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-bf16-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-bf16-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp32-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp32-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp32-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-bf16-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp32-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-bf16-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp32-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp32-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp32-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp32-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp32-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-bf16-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp32-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-bf16-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-bf16-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp32-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-bf16-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[distilbert/distilgpt2-text-generation-fp32-noCG-noTriton]": + "Xfail due to SW-182748", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[distilbert/distilgpt2-text-generation-bf16-noCG-noTriton]": + "Xfail due to SW-182748", + "unit/accelerator/test_accelerator.py::test_abstract_methods_defined[deepspeed.accelerator.xpu_accelerator]": + "Xfail due to SW-182749", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[cpu-3-full-dtype2]": + "Xfail due to SW-181951", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[cpu-3-full-dtype0]": + "Xfail due to SW-181951", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[cpu-3-local-dtype2]": + "Xfail due to SW-181951", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[cpu-3-local-dtype0]": + "Xfail due to SW-181951", + "unit/inference/test_inference.py::TestInjectionPolicy::test[fp32-t5]": + "Xfail, due to SW-182668", + "unit/inference/test_inference.py::TestAutoTensorParallelism::test[bf16-marian]": + "Xfail, due to SW-182669", + "unit/inference/test_inference.py::TestAutoTensorParallelism::test_odd_world_size[bf16-marian]": + "Xfail, due to SW-182670", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[facebook/opt-350m-text-generation-fp32-noCG-noTriton]": + "Xfail due to SW-182671", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[openai-community/gpt2-text-generation-fp32-noCG-noTriton]": + "Xfail due to SW-182671", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[facebook/opt-350m-text-generation-bf16-noCG-noTriton]": + "Xfail due to SW-182671", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[facebook/opt-125m-text-generation-bf16-noCG-noTriton]": + "Xfail due to SW-182671", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[openai-community/gpt2-text-generation-bf16-noCG-noTriton]": + "Xfail due to SW-182671", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[Norod78/hebrew-bad_wiki-gpt_neo-tiny-text-generation-bf16-noCG-noTriton]": + "Xfail due to SW-182671", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[facebook/opt-125m-text-generation-fp32-noCG-noTriton]": + "Xfail due to SW-182671", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[Norod78/hebrew-bad_wiki-gpt_neo-tiny-text-generation-fp32-noCG-noTriton]": + "Xfail due to SW-182671", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-125m-text-generation-bf16-noCG-noTriton]": + "Xfail due to SW-182671", + "unit/inference/test_inference.py::TestModelTask::test[openai-community/gpt2-text-generation-fp32-noCG-noTriton]": + "Xfail due to SW-182671", + "unit/inference/test_inference.py::TestModelTask::test[Norod78/hebrew-bad_wiki-gpt_neo-tiny-text-generation-bf16-noCG-noTriton]": + "Xfail due to SW-182671", + "unit/inference/test_inference.py::TestModelTask::test[distilbert/distilgpt2-text-generation-fp32-noCG-noTriton]": + "Xfail due to SW-182671", + "unit/inference/test_inference.py::TestModelTask::test[Norod78/hebrew-bad_wiki-gpt_neo-tiny-text-generation-fp32-noCG-noTriton]": + "Xfail due to SW-182671", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-125m-text-generation-fp32-noCG-noTriton]": + "Xfail due to SW-182671", + "unit/inference/test_inference.py::TestModelTask::test[openai-community/gpt2-text-generation-bf16-noCG-noTriton]": + "Xfail due to SW-182671", + "unit/inference/test_inference.py::TestModelTask::test[distilbert/distilgpt2-text-generation-bf16-noCG-noTriton]": + "Xfail due to SW-182671", + "unit/runtime/pipe/test_pipe.py::TestPipeCifar10::test_pipe_use_reentrant[topo_config2]": + "Xfail due to SW-182509", + "unit/runtime/pipe/test_pipe.py::TestPipeCifar10::test_pipe_use_reentrant[topo_config1]": + "Xfail due to SW-182509", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_bf16_fragments[False]": + "Xfail, due to SW-182511", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[cpu-3-dtype2]": + "Xfail due to SW-181951", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[cpu-3-dtype0]": + "Xfail due to SW-181951", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-3-dtype2]": + "Xfail due to OP not implemented on HPU", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-3-dtype0]": + "Xfail due to OP not implemented on HPU", + "unit/inference/test_inference.py::TestLMCorrectness::test[lambada_standard-gpt2-openai-community/gpt2-xl]": + "xfail due to model download", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-bf16-CG-noTriton]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[deepset/roberta-base-squad2-question-answering-bf16-noCG-noTriton]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-bf16-noCG-noTriton]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[Jean-Baptiste/roberta-large-ner-english-token-classification-bf16-noCG-noTriton]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[deepset/roberta-base-squad2-question-answering-bf16-CG-noTriton]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[deepset/minilm-uncased-squad2-question-answering-bf16-CG-noTriton]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[j-hartmann/emotion-english-distilroberta-base-text-classification-bf16-CG-noTriton]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-bf16-noCG-noTriton]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[dslim/bert-base-NER-token-classification-bf16-noCG-noTriton]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-bf16-CG-noTriton]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[deepset/minilm-uncased-squad2-question-answering-bf16-noCG-noTriton]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[j-hartmann/emotion-english-distilroberta-base-text-classification-bf16-noCG-noTriton]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[dslim/bert-base-NER-token-classification-bf16-CG-noTriton]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[Jean-Baptiste/roberta-large-ner-english-token-classification-bf16-CG-noTriton]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[google-bert/bert-base-multilingual-cased-fill-mask-fp16-CG-noTriton]": + "Xfail due to FP16 not supported to gaudi1", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[bigscience/bloom-560m-text-generation-fp16-noCG-noTriton]": + "Xfail due to FP16 not supported to gaudi1", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[deepset/minilm-uncased-squad2-question-answering-fp16-noCG-noTriton]": + "Xfail due to FP16 not supported to gaudi1", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp16-noCG-noTriton]": + "Xfail due to FP16 not supported to gaudi1", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp16-CG-noTriton]": + "Xfail due to FP16 not supported to gaudi1", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[FacebookAI/roberta-base-fill-mask-fp16-noCG-noTriton]": + "Xfail due to FP16 not supported to gaudi1", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp16-CG-noTriton]": + "Xfail due to FP16 not supported to gaudi1", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp16-CG-noTriton]": + "Xfail due to FP16 not supported to gaudi1", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[google-bert/bert-large-uncased-fill-mask-fp16-noCG-noTriton]": + "Xfail due to FP16 not supported to gaudi1", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[google-bert/bert-base-cased-fill-mask-fp16-noCG-noTriton]": + "Xfail due to FP16 not supported to gaudi1", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[google-bert/bert-large-uncased-fill-mask-fp16-CG-noTriton]": + "Xfail due to FP16 not supported to gaudi1", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[FacebookAI/roberta-base-fill-mask-fp16-CG-noTriton]": + "Xfail due to FP16 not supported to gaudi1", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[distilbert/distilgpt2-text-generation-fp16-noCG-noTriton]": + "Xfail due to FP16 not supported to gaudi1", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[google-bert/bert-base-uncased-fill-mask-fp16-noCG-noTriton]": + "Xfail due to FP16 not supported to gaudi1", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp16-CG-noTriton]": + "Xfail due to FP16 not supported to gaudi1", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[facebook/opt-350m-text-generation-fp16-noCG-noTriton]": + "Xfail due to FP16 not supported to gaudi1", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[facebook/opt-125m-text-generation-fp16-noCG-noTriton]": + "Xfail due to FP16 not supported to gaudi1", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[deepset/roberta-base-squad2-question-answering-fp16-CG-noTriton]": + "Xfail due to FP16 not supported to gaudi1", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[google-bert/bert-base-cased-fill-mask-fp16-CG-noTriton]": + "Xfail due to FP16 not supported to gaudi1", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[google-bert/bert-large-cased-fill-mask-fp16-noCG-noTriton]": + "Xfail due to FP16 not supported to gaudi1", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp16-noCG-noTriton]": + "Xfail due to FP16 not supported to gaudi1", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[Norod78/hebrew-bad_wiki-gpt_neo-tiny-text-generation-fp16-noCG-noTriton]": + "Xfail due to FP16 not supported to gaudi1", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[google-bert/bert-base-multilingual-cased-fill-mask-fp16-noCG-noTriton]": + "Xfail due to FP16 not supported to gaudi1", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp16-noCG-noTriton]": + "Xfail due to FP16 not supported to gaudi1", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[FacebookAI/roberta-large-fill-mask-fp16-CG-noTriton]": + "Xfail due to FP16 not supported to gaudi1", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[deepset/roberta-base-squad2-question-answering-fp16-noCG-noTriton]": + "Xfail due to FP16 not supported to gaudi1", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[FacebookAI/roberta-large-fill-mask-fp16-noCG-noTriton]": + "Xfail due to FP16 not supported to gaudi1", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[google-bert/bert-base-uncased-fill-mask-fp16-CG-noTriton]": + "Xfail due to FP16 not supported to gaudi1", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[dslim/bert-base-NER-token-classification-fp16-noCG-noTriton]": + "Xfail due to FP16 not supported to gaudi1", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp16-noCG-noTriton]": + "Xfail due to FP16 not supported to gaudi1", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp16-CG-noTriton]": + "Xfail due to FP16 not supported to gaudi1", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[dslim/bert-base-NER-token-classification-fp16-CG-noTriton]": + "Xfail due to FP16 not supported to gaudi1", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[deepset/minilm-uncased-squad2-question-answering-fp16-CG-noTriton]": + "Xfail due to FP16 not supported to gaudi1", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[openai-community/gpt2-text-generation-fp16-noCG-noTriton]": + "Xfail due to FP16 not supported to gaudi1", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[google-bert/bert-large-cased-fill-mask-fp16-CG-noTriton]": + "Xfail due to FP16 not supported to gaudi1", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp16-noCG-noTriton]": + "Xfail due to FP16 not supported to gaudi1", + "unit/inference/test_inference.py::TestMPSize::test[fp16-gpt-neo]": + "Xfail due to FP16 not supported on gaudi", + "unit/inference/test_inference.py::TestMPSize::test[fp16-bloom]": + "Xfail due to FP16 not supported on gaudi", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[False-False-False-1-dtype1]": + "Xfail due to fp16 not supported", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[False-False-False-1-dtype1]": + "Xfail due to fp16 not supported", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[False-True-False-1-dtype1]": + "Xfail due to fp16 not supported", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[False-True-True-1-dtype1]": + "Xfail due to fp16 not supported", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[False-True-False-1-dtype1]": + "Xfail due to fp16 not supported", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[False-False-True-1-dtype1]": + "Xfail due to fp16 not supported", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[False-False-False-1-dtype1]": + "Xfail due to fp16 not supported", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[False-True-True-1-dtype1]": + "Xfail due to fp16 not supported", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[False-True-False-1-dtype1]": + "Xfail due to fp16 not supported", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[False-True-True-1-dtype1]": + "Xfail due to fp16 not supported", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[False-False-True-1-dtype1]": + "Xfail due to fp16 not supported", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[False-False-True-1-dtype1]": + "Xfail due to fp16 not supported", + "unit/inference/test_inference.py::TestInjectionPolicy::test[ws2-fp32-t5]": + "Xfail, due to SW-182668.", + "unit/inference/test_inference.py::TestInjectionPolicy::test[ws1-fp32-t5]": + "Xfail, due to SW-182668.", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[True-False-True-1-dtype1]": + "Xfail due to fp16 not supported", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[True-True-False-1-dtype1]": + "Xfail due to fp16 not supported", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[True-True-False-1-dtype1]": + "Xfail due to fp16 not supported", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[True-False-False-1-dtype1]": + "Xfail due to fp16 not supported", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[True-True-False-1-dtype1]": + "Xfail due to fp16 not supported", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[True-False-False-1-dtype1]": + "Xfail due to fp16 not supported", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[True-False-True-1-dtype1]": + "Xfail due to fp16 not supported", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[True-True-True-1-dtype1]": + "Xfail due to fp16 not supported", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[True-True-True-1-dtype1]": + "Xfail due to fp16 not supported", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[True-True-True-1-dtype1]": + "Xfail due to fp16 not supported", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[True-False-True-1-dtype1]": + "Xfail due to fp16 not supported", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[True-False-False-1-dtype1]": + "Xfail due to fp16 not supported", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-125m-text-generation-fp16-noCG-noTriton-False-True]": + "Xfail due to SW-189257", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp32-noCG-noTriton-False-True]": + "Xfail due to SW-189257", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-125m-text-generation-fp32-noCG-noTriton-False-True]": + "Xfail due to SW-189257", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp16-noCG-noTriton-False-True]": + "Xfail due to SW-189257", + "unit/inference/test_inference.py::TestMPSize::test[fp16-bloom-True]": + "Fp16 not supported", + "unit/inference/test_inference.py::TestMPSize::test[fp16-bloom-False]": + "FP16 not supported", + "unit/inference/test_inference.py::TestAutoTensorParallelism::test_odd_world_size[bf16-marian-True]": + "Xfail due to SW-189262", + "unit/inference/test_inference.py::TestAutoTensorParallelism::test[fp16-marian-True]": + "Xfail due to SW-189262", + "unit/inference/test_inference.py::TestAutoTensorParallelism::test[bf16-marian-True]": + "Xfail due to SW-189262", + "unit/inference/test_inference.py::TestAutoTensorParallelism::test_odd_world_size[fp16-marian-True]": + "Xfail due to SW-189262", +} + +g2_eager_xfail_tests = { + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp32-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp32-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-bf16-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp32-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp32-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp16-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp32-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-bf16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-bf16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp16-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-bf16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp32-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp32-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp16-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-bf16-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[Norod78/hebrew-bad_wiki-gpt_neo-tiny-text-generation-bf16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp32-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-bf16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp16-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp32-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-bf16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-bf16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp32-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp32-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-bf16-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-bf16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-bf16-CG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp16-noCG-noTriton]": + "Xfail, due to SW-163097.", + "unit/inference/test_stable_diffusion.py::TestStableDiffusion::test": + "Xfail, due to SW-170181.", + "unit/ops/accelerators/test_accelerator_backward.py::TestCUDABackward::test_backward[64-160-128-2-24-False-True-0.2]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_backward.py::TestCUDABackward::test_backward[64-1600-128-2-4-False-True-0.2]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_backward.py::TestCUDABackward::test_backward[8-1600-128-2-3-True-True-0.05]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_backward.py::TestCUDABackward::test_backward[8-1600-128-25-3-True-True-0.05]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_backward.py::TestCUDABackward::test_backward[8-160-128-2-3-True-True-0.1]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForwardSmallBatchSize::test_forward_with_small_bsz[8-3-1024-512-16-3-False-False]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForwardSmallBatchSize::test_forward_with_small_bsz[8-7-1024-512-16-3-False-True]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForwardSmallBatchSize::test_forward_with_small_bsz[8-7-1024-512-16-3-True-True]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForwardSmallBatchSize::test_forward_with_small_bsz[8-3-1024-512-16-3-True-False]": + "Xfail, due to SW-176905.", + "unit/compression/test_dequantization.py::TestDequantization::test_dequantize": + "Xfail, due to SW-168442.", + "unit/utils/test_init_on_device.py::TestOnDevice::test_on_device[hpu]": + "Xfail, due to SW-178730.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_post_init_quant_nvme_offload": + "Xfail, due to SW-164545.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_quantized_initialization_nvme_offload": + "Xfail, due to SW-164545.", + "unit/ops/lion/test_lion.py::TestLionConfigs::test[Lion-True-DeepSpeedCPULion]": + "Xfail, due to SW-176903.", + "unit/ops/lion/test_lion.py::TestLionConfigs::test[Lion-False-FusedLion]": + "Xfail, due to SW-176903.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[3-True-True]": + "Xfail, due to SW-179864.", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[3-True-True]": + "Xfail, due to SW-179864.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_save_exclude_custom_frozen_weights[2-True]": + "Xfail, due to SW-179873.", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[3-True-deepspeed_adam-True]": + "Xfail, due to SW-179864.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-512-16-3-False-False]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[64-1024-56-16-3-False-False]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-384-16-3-False-True]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-119-16-3-True-False]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-128-128-2-3-True-True]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-2560-128-40-3-False-True]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1600-128-25-3-False-True]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1600-128-2-3-True-True]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-509-16-3-True-False]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-120-16-3-True-True]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[1-256-2048-32-3-True-True]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-2560-128-40-3-False-False]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1536-128-24-3-False-True]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-128-128-2-3-True-False]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-256-52-4-3-True-True]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-512-16-3-True-True]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-160-128-2-3-False-True]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-511-16-3-False-True]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[3-1024-54-16-3-True-True]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-384-16-3-True-True0]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-384-16-3-True-True1]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1536-128-24-3-False-False]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[64-1024-53-16-3-False-True]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-381-16-3-True-False]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[64-160-128-2-24-False-True]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-384-16-3-False-False]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-4096-128-64-3-True-True]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-2048-128-32-3-False-True]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[64-1024-24-16-3-False-False]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-2048-128-32-3-False-False]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[64-1024-21-16-3-False-True]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1600-128-25-3-True-True]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-8192-128-64-3-False-True]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-160-128-2-3-True-True]": + "Xfail, due to SW-176905.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[3-1024-51-16-3-True-False]": + "Xfail, due to SW-176905.", + "unit/inference/test_inference.py::TestMPSize::test[fp16-bloom]": + "Xfail, due to SW-175376.", + "unit/inference/test_model_profiling.py::TestModelProfiling::test[False-True]": + "Xfail, due to SW-163097.", + "unit/inference/test_model_profiling.py::TestModelProfiling::test[False-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_model_profiling.py::TestModelProfiling::test[True-False]": + "Xfail, due to SW-163097.", + "unit/inference/test_model_profiling.py::TestModelProfiling::test[True-True]": + "Xfail, due to SW-163097.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[1-9-1024]": + "Xfail, due to SW-164239.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[4-9-1024]": + "Xfail, due to SW-164239.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[4-4-1024]": + "Xfail, due to SW-164239.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[2-9-1024]": + "Xfail, due to SW-164239.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[1-4-1024]": + "Xfail, due to SW-164239.", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[2-4-1024]": + "Xfail, due to SW-164239.", + "unit/launcher/test_user_args.py::test_user_args[True-I'm going to tell them \"DeepSpeed is the best\"]": + "Xfail due to SW-182753", + "unit/launcher/test_user_args.py::test_user_args[True-'\"translate English to Romanian: \"']": + "Xfail due to SW-182753", + "unit/launcher/test_user_args.py::test_user_args[True-'I am 72\" tall']": + "Xfail due to SW-182753", + "unit/launcher/test_user_args.py::test_user_args[True-\"I am 6' tall\"]": + "Xfail due to SW-182753", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[distilbert/distilgpt2-text-generation-fp32-noCG-noTriton]": + "Xfail due to SW-182748", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[distilbert/distilgpt2-text-generation-fp16-noCG-noTriton]": + "Xfail due to SW-182748", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[distilbert/distilgpt2-text-generation-bf16-noCG-noTriton]": + "Xfail due to SW-182748", + "unit/accelerator/test_accelerator.py::test_abstract_methods_defined[deepspeed.accelerator.xpu_accelerator]": + "Xfail due to SW-182749", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[cpu-EleutherAI/gpt-neo-125m-zero_stage=3-bsz=1]": + "Xfail due to SW-181951", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[cpu-facebook/opt-350m-zero_stage=3-bsz=1]": + "Xfail due to SW-181951", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[cpu-bigscience/bloom-560m-zero_stage=3-bsz=1]": + "Xfail due to SW-181951", + "unit/runtime/half_precision/test_fp16.py::TestAdamFP16ZeroOneCycleCompatibility::test[True-3]": + "Xfail due to SW-181951", + "unit/runtime/half_precision/test_fp16.py::TestZeroStaticScale::test[True-3]": + "Xfail due to SW-181951", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[3-True-False]": + "Xfail due to SW-181951", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[3-True-False]": + "Xfail due to SW-181951", + "unit/runtime/zero/test_zero_offloadpp.py::TestZeroPartialOffloadConfigSweep::test[4-1024]": + "Xfail due to SW-181951", + "unit/runtime/zero/test_zero_offloadpp.py::TestZeroPartialOffloadConfigSweep::test[8-1024]": + "Xfail due to SW-181951", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[3-True-deepspeed_adam-False]": + "Xfail due to SW-181951", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[3-True-deepspeed_adam-False]": + "Xfail due to SW-181951", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[cpu-3-local-dtype1]": + "Xfail due to SW-181951", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[cpu-3-local-dtype0]": + "Xfail due to SW-181951", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[cpu-3-full-dtype1]": + "Xfail due to SW-181951", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[cpu-3-full-dtype0]": + "Xfail due to SW-181951", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[cpu-3-local-dtype2]": + "Xfail due to SW-181951", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[cpu-3-full-dtype2]": + "Xfail due to SW-181951", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[cpu-3-local-True]": + "Xfail due to SW-181951", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[cpu-3-local-False]": + "Xfail due to SW-181951", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[cpu-3-full-False]": + "Xfail due to SW-181951", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[cpu-3-full-True]": + "Xfail due to SW-181951", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-bf16-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-bf16-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp16-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp32-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-bf16-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp16-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp16-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp16-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp16-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-bf16-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp16-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-bf16-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-bf16-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-bf16-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-bf16-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp16-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-bf16-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-bf16-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp16-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp32-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-bf16-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp32-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp32-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp16-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp16-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp32-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp16-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-bf16-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp32-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp32-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-bf16-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-bf16-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp32-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp16-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp32-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp32-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp16-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-bf16-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp32-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-bf16-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp32-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp32-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp32-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp32-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp16-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp32-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-bf16-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp16-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp16-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp16-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp32-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp16-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-bf16-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp16-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp16-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-bf16-CG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp32-noCG-noTriton]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-bf16-CG-noTriton]": + "Xfail due to SW-163097", + "unit/runtime/comm/test_coalesced_collectives.py::TestAllToAllQuantReduceFallback::test_1d_tensor": + "Xfail due to SW-182766", + "unit/runtime/comm/test_coalesced_collectives.py::TestAllToAllQuantReduceFallback::test_non_divisible": + "Xfail due to SW-182766", + "unit/inference/test_inference.py::TestModelTask::test[openai-community/gpt2-text-generation-bf16-noCG-noTriton]": + "Xfail due to SW-181935", + "unit/inference/test_inference.py::TestModelTask::test[distilbert/distilgpt2-text-generation-bf16-noCG-noTriton]": + "Xfail due to SW-181935", + "unit/inference/test_inference.py::TestAutoTensorParallelism::test[fp16-codegen]": + "Xfail, due to SW-178702", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[22-fp16]": + "Xfail, due to SW-182502", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[1024-fp16]": + "Xfail, due to SW-182502", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[128-fp16]": + "Xfail, due to SW-182502", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[64-fp16]": + "Xfail, due to SW-182502", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[1048576-fp16]": + "Xfail, due to SW-182502", + "unit/ops/adam/test_hybrid_adam.py::TestHybridAdam::test_hybrid_adam_equal[16-fp16]": + "Xfail, due to SW-182502", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_float_int4_quantization": + "Xfail, due to SW-182501", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_float_int8_quantization": + "Xfail, due to SW-182501", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[cpu-3-dtype2]": + "Xfail due to SW-181951", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[cpu-3-dtype0]": + "Xfail due to SW-181951", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[cpu-3-dtype1]": + "Xfail due to SW-181951", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[3-True-deepspeed_adam-True]": + "Xfail due to SW-181951", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-3-dtype2]": + "Xfail due to op not been implemented on HPU", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-3-dtype1]": + "Xfail due to op not been implemented on HPU", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-3-dtype0]": + "Xfail due to op not been implemented on HPU", + "unit/inference/test_inference.py::TestLMCorrectness::test[lambada_standard-gpt2-openai-community/gpt2-xl]": + "xfail due to model download", + "unit/runtime/zero/test_zero.py::TestZeroAdamOptimizerStepCount::test[3]": + "Xfail due to SW-181940", + "unit/runtime/zero/test_zero.py::TestZero3RepeatForwardLoop::test[True]": + "Xfail due to SW-181939", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-bf16-CG-noTriton]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[deepset/roberta-base-squad2-question-answering-bf16-noCG-noTriton]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-bf16-noCG-noTriton]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[Jean-Baptiste/roberta-large-ner-english-token-classification-bf16-noCG-noTriton]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[deepset/roberta-base-squad2-question-answering-bf16-CG-noTriton]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[deepset/minilm-uncased-squad2-question-answering-bf16-CG-noTriton]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[j-hartmann/emotion-english-distilroberta-base-text-classification-bf16-CG-noTriton]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-bf16-noCG-noTriton]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[dslim/bert-base-NER-token-classification-bf16-noCG-noTriton]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-bf16-CG-noTriton]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[deepset/minilm-uncased-squad2-question-answering-bf16-noCG-noTriton]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[j-hartmann/emotion-english-distilroberta-base-text-classification-bf16-noCG-noTriton]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[dslim/bert-base-NER-token-classification-bf16-CG-noTriton]": + "xfail due to SW-184834", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[Jean-Baptiste/roberta-large-ner-english-token-classification-bf16-CG-noTriton]": + "xfail due to SW-184834", + "unit/hybrid_engine/test_he_llama.py::TestHybridEngineLlama::test_functionality[huggyllama/llama-7b-bsz=2]": + " xfail due to SW-185015", + "unit/hybrid_engine/test_he_llama.py::TestHybridEngineLlama::test_functionality[huggyllama/llama-7b-bsz=1]": + " xfail due to SW-185015", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-False-False-True]": + "Xfail due to SW-185136", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-False-True-True]": + "Xfail due to SW-185136", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-True-True-True]": + "Xfail due to SW-185136", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-True-False-True]": + "Xfail due to SW-185136", + "unit/ops/adam/test_hybrid_adam.py::TestHybridAdam::test_hybrid_adam_equal[8-fp16]": + "Xfail due to SW-182502", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp16-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp32-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp32-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp32-CG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp16-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp32-noCG-noTriton-True-True]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp16-noCG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp16-CG-noTriton-True-False]": + "Xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-125m-text-generation-fp16-noCG-noTriton-False-True]": + "Xfail due to SW-189257", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp32-noCG-noTriton-False-True]": + "Xfail due to SW-189257", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-125m-text-generation-fp32-noCG-noTriton-False-True]": + "Xfail due to SW-189257", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp16-noCG-noTriton-False-True]": + "Xfail due to SW-189257", + "unit/inference/test_inference.py::TestMPSize::test[fp16-bloom-False]": + "Xfail due to SW-188513", + "unit/inference/test_inference.py::TestMPSize::test[fp16-bloom-True]": + "Xfail due to 189259", + "unit/inference/test_inference.py::TestAutoTensorParallelism::test_odd_world_size[bf16-marian-True]": + "Xfail due to SW-189262", + "unit/inference/test_inference.py::TestAutoTensorParallelism::test[fp16-marian-True]": + "Xfail due to SW-189262", + "unit/inference/test_inference.py::TestAutoTensorParallelism::test[bf16-marian-True]": + "Xfail due to SW-189262", + "unit/inference/test_inference.py::TestAutoTensorParallelism::test_odd_world_size[fp16-marian-True]": + "Xfail due to SW-189262", +} + +g3_eager_xfail_tests = { + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[3-True-deepspeed_adam-True]": + "Xfail due to SW-178702 RuntimeError: Expected all tensors to be on the HPU device, but found at least one input[idx=1] on cpu ", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[3-True-deepspeed_adam-True]": + "Xfail due to SW-178702 RuntimeError: Expected all tensors to be on the HPU device, but found at least one input[idx=1] on cpu ", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[cpu-3-dtype0]": + "Xfail due to SW-178702 RuntimeError: Expected all tensors to be on the HPU device, but found at least one input[idx=1] on cpu ", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[cpu-3-dtype2]": + "Xfail due to SW-178702 RuntimeError: Expected all tensors to be on the HPU device, but found at least one input[idx=1] on cpu ", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[cpu-3-dtype1]": + "Xfail due to SW-178702 RuntimeError: Expected all tensors to be on the HPU device, but found at least one input[idx=1] on cpu ", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[3-True-True]": + "Xfail due to SW-178702 RuntimeError: Expected all tensors to be on the HPU device, but found at least one input[idx=1] on cpu ", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[3-True-True]": + "Xfail due to SW-178702 RuntimeError: Expected all tensors to be on the HPU device, but found at least one input[idx=1] on cpu ", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-1-dtype1]": + "This op had not been implemented on HPU backend", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-3-dtype2]": + "This op had not been implemented on HPU backend", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-1-dtype0]": + "This op had not been implemented on HPU backend", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-3-dtype1]": + "This op had not been implemented on HPU backend", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-3-dtype0]": + "This op had not been implemented on HPU backend", + "unit/ops/accelerators/test_accelerator_backward.py::TestCUDABackward::test_backward[64-160-128-2-24-False-True-0.2]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_backward.py::TestCUDABackward::test_backward[8-1600-128-25-3-True-True-0.05]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_backward.py::TestCUDABackward::test_backward[8-160-128-2-3-True-True-0.1]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_backward.py::TestCUDABackward::test_backward[8-1600-128-2-3-True-True-0.05]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_backward.py::TestCUDABackward::test_backward[64-1600-128-2-4-False-True-0.2]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForwardSmallBatchSize::test_forward_with_small_bsz[8-3-1024-512-16-3-False-False]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForwardSmallBatchSize::test_forward_with_small_bsz[8-7-1024-512-16-3-True-True]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForwardSmallBatchSize::test_forward_with_small_bsz[8-3-1024-512-16-3-True-False]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForwardSmallBatchSize::test_forward_with_small_bsz[8-7-1024-512-16-3-False-True]": + "xfail due to SW-176905", + "unit/runtime/comm/test_coalesced_collectives.py::TestAllToAllQuantReduceFallback::test_1d_tensor": + "xfail due to SW-182766", + "unit/runtime/comm/test_coalesced_collectives.py::TestAllToAllQuantReduceFallback::test_non_divisible": + "xfail due to SW-182766", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_post_init_quant_nvme_offload": + "xfail due to SW-168596", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_quantized_initialization_nvme_offload": + "xfail due to SW-168596", + "unit/ops/lion/test_lion.py::TestLionConfigs::test[Lion-False-FusedLion]": + "xfail due to SW-176903", + "unit/ops/lion/test_lion.py::TestLionConfigs::test[Lion-True-DeepSpeedCPULion]": + "xfail due to SW-176903", + "unit/accelerator/test_accelerator.py::test_abstract_methods_defined[deepspeed.accelerator.xpu_accelerator]": + "Xfail due to SW-182749", + "unit/launcher/test_user_args.py::test_user_args[True-I'm going to tell them \"DeepSpeed is the best\"]": + "Xfail due to SW-182753", + "unit/launcher/test_user_args.py::test_user_args[True-'\"translate English to Romanian: \"']": + "Xfail due to SW-182753", + "unit/launcher/test_user_args.py::test_user_args[True-'I am 72\" tall']": + "Xfail due to SW-182753", + "unit/launcher/test_user_args.py::test_user_args[True-\"I am 6' tall\"]": + "Xfail due to SW-182753", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[22-fp16]": + "Xfail due to SW-188274", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[1024-fp16]": + "Xfail due to SW-188274", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[1048576-fp16]": + "Xfail due to SW-188274", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[64-fp16]": + "Xfail due to SW-188274", + "unit/ops/adam/test_cpu_adam.py::TestCPUAdam::test_fused_adam_equal[128-fp16]": + "Xfail due to SW-188274", + "unit/ops/adam/test_hybrid_adam.py::TestHybridAdam::test_hybrid_adam_equal[16-fp16]": + "Xfail due to SW-188274", + "unit/ops/adam/test_hybrid_adam.py::TestHybridAdam::test_hybrid_adam_equal[8-fp16]": + "Xfail due to SW-188274", + "unit/runtime/half_precision/test_fp16.py::TestAdamFP16ZeroOneCycleCompatibility::test[True-3]": + "Xfail due to SW-178702", + "unit/runtime/half_precision/test_fp16.py::TestZeroStaticScale::test[True-3]": + "Xfail due to SW-178702", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[3-True-False]": + "Xfail due to SW-178702", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[3-True-False]": + "Xfail due to SW-178702", + "unit/runtime/zero/test_zero_offloadpp.py::TestZeroPartialOffloadConfigSweep::test[4-1024]": + "Xfail due to SW-178702", + "unit/runtime/zero/test_zero_offloadpp.py::TestZeroPartialOffloadConfigSweep::test[8-1024]": + "Xfail due to SW-178702", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[3-True-deepspeed_adam-False]": + "Xfail due to SW-178702", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[3-True-deepspeed_adam-False]": + "Xfail due to SW-178702", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[cpu-3-full-False]": + "Xfail due to SW-178702", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[cpu-3-full-True]": + "Xfail due to SW-178702", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[cpu-3-local-True]": + "Xfail due to SW-178702", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentGet::test_zero_fragments[cpu-3-local-False]": + "Xfail due to SW-178702", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[cpu-3-local-dtype1]": + "Xfail due to SW-178702", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[cpu-3-local-dtype0]": + "Xfail due to SW-178702", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[cpu-3-full-dtype2]": + "Xfail due to SW-178702", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[cpu-3-local-dtype2]": + "Xfail due to SW-178702", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[cpu-3-full-dtype0]": + "Xfail due to SW-178702", + "unit/runtime/zero/test_zero_tensor_fragment.py::TestTensorFragmentUpdate::test_zero_fragments[cpu-3-full-dtype1]": + "Xfail due to SW-178702", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[True-False-1-dtype1]": + "Xfail due to SW-187821", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[True-False-1-dtype2]": + "Xfail due to SW-187821", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[False-True-1-dtype1]": + "Xfail due to SW-187821", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[False-False-1-dtype0]": + "Xfail due to SW-187821", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[True-True-1-dtype0]": + "Xfail due to SW-187821", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[True-False-1-dtype1]": + "Xfail due to SW-187821", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[False-True-1-dtype1]": + "Xfail due to SW-187821", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[False-True-1-dtype2]": + "Xfail due to SW-187821", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[True-True-1-dtype1]": + "Xfail due to SW-187821", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[False-True-1-dtype2]": + "Xfail due to SW-187821", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[True-False-1-dtype2]": + "Xfail due to SW-187821", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[True-True-1-dtype2]": + "Xfail due to SW-187821", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[True-False-1-dtype0]": + "Xfail due to SW-187821", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[False-False-1-dtype1]": + "Xfail due to SW-187821", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[False-True-1-dtype0]": + "Xfail due to SW-187821", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[True-True-1-dtype0]": + "Xfail due to SW-187821", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[False-False-1-dtype1]": + "Xfail due to SW-187821", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[False-False-1-dtype2]": + "Xfail due to SW-187821", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[False-True-1-dtype0]": + "Xfail due to SW-187821", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[True-True-1-dtype2]": + "Xfail due to SW-187821", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[False-True-1-dtype1]": + "Xfail due to SW-187821", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[True-True-1-dtype1]": + "Xfail due to SW-187821", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[True-True-1-dtype2]": + "Xfail due to SW-187821", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[False-True-1-dtype2]": + "Xfail due to SW-187821", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[False-False-1-dtype1]": + "Xfail due to SW-187821", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[True-False-1-dtype0]": + "Xfail due to SW-187821", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[False-False-1-dtype2]": + "Xfail due to SW-187821", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[False-False-1-dtype0]": + "Xfail due to SW-187821", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[True-False-1-dtype0]": + "Xfail due to SW-187821", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[True-True-1-dtype0]": + "Xfail due to SW-187821", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[True-False-1-dtype2]": + "Xfail due to SW-187821", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[False-True-1-dtype0]": + "Xfail due to SW-187821", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[True-False-1-dtype1]": + "Xfail due to SW-187821", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to4[False-False-1-dtype2]": + "Xfail due to SW-187821", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_4to2[True-True-1-dtype1]": + "Xfail due to SW-187821", + "unit/checkpoint/test_universal_checkpoint.py::TestZeROUniversalCheckpointDP::test_dp_world_size_2to2[False-False-1-dtype0]": + "Xfail due to SW-187821", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_float_int8_quantization": + "Xfail due to SW_182501", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_float_int4_quantization": + "Xfail due to SW_182501", + "unit/runtime/zero/test_zero.py::TestZero3RepeatForwardLoop::test[True]": + "Xfail due to SW-191939", + "unit/compression/test_dequantization.py::TestDequantization::test_dequantize": + "Xfail due to SW-168442", + "unit/inference/test_human_eval.py::test_human_eval[codellama/CodeLlama-7b-Python-hf]": + "Xfail due to SW-182759", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp32-CG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp16-CG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp16-CG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp16-noCG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp16-CG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp16-CG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp32-CG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp16-noCG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp16-noCG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp32-noCG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp16-noCG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp16-noCG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp32-CG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp16-noCG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp16-CG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp16-noCG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp32-noCG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp32-noCG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp32-CG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-noCG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp32-noCG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp32-noCG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[facebook/opt-350m-text-generation-fp16-noCG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp16-CG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp32-CG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp16-noCG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp16-noCG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp32-CG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp32-CG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp16-noCG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp16-noCG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp32-noCG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp32-CG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp16-CG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp16-CG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp16-CG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp16-noCG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp16-CG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp16-noCG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp32-noCG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp32-CG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp16-CG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp32-noCG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp32-noCG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp32-CG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp16-CG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp16-CG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp32-noCG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp16-CG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp32-CG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp32-noCG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp16-CG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp32-CG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp32-CG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-CG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp32-CG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp32-noCG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[deepset/roberta-base-squad2-question-answering-fp16-noCG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp32-noCG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp32-noCG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[Jean-Baptiste/roberta-large-ner-english-token-classification-fp32-noCG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_inference.py::TestModelTask::test[j-hartmann/emotion-english-distilroberta-base-text-classification-fp16-noCG-noTriton]": + "xfail due to SW-163097", + "unit/inference/test_stable_diffusion.py::TestStableDiffusion::test": + "Xfail, due to SW-170181.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[64-160-128-2-24-False-True]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1600-128-25-3-True-True]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-384-16-3-False-True]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-509-16-3-True-False]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-4096-128-64-3-True-True]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-384-16-3-True-True1]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[64-1024-21-16-3-False-True]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[3-1024-51-16-3-True-False]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1536-128-24-3-False-True]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[64-1024-53-16-3-False-True]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-381-16-3-True-False]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-512-16-3-True-True]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-120-16-3-True-True]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-512-16-3-False-False]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1600-128-25-3-False-True]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-2048-128-32-3-False-False]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-384-16-3-True-True0]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[1-256-2048-32-3-True-True]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-119-16-3-True-False]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-160-128-2-3-False-True]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-384-16-3-False-False]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1536-128-24-3-False-False]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-2560-128-40-3-False-True]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[3-1024-54-16-3-True-True]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1024-511-16-3-False-True]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-160-128-2-3-True-True]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[64-1024-24-16-3-False-False]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[64-1024-56-16-3-False-False]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-128-128-2-3-True-False]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-256-52-4-3-True-True]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-8192-128-64-3-False-True]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-128-128-2-3-True-True]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-2560-128-40-3-False-False]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-2048-128-32-3-False-True]": + "xfail due to SW-176905", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-1600-128-2-3-True-True]": + "xfail due to SW-176905", + "unit/inference/test_model_profiling.py::TestModelProfiling::test[True-True]": + "Xfail, due to SW-163097", + "unit/inference/test_model_profiling.py::TestModelProfiling::test[True-False]": + "Xfail, due to SW-163097", + "unit/inference/test_model_profiling.py::TestModelProfiling::test[False-True]": + "Xfail, due to SW-163097", + "unit/inference/test_model_profiling.py::TestModelProfiling::test[False-False]": + "Xfail, due to SW-163097", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[1-9-1024]": + "Xfail, due to SW-164239", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[4-9-1024]": + "Xfail, due to SW-164239", + "unit/runtime/zero/test_zeropp.py::TestZeroPPConfigSweep::test[2-9-1024]": + "Xfail, due to SW-164239", + "unit/inference/test_inference.py::TestMPSize::test[fp16-bloom]": + "Xfail, due to SW-175376.", + "unit/hybrid_engine/test_he_llama.py::TestHybridEngineLlama::test_functionality[huggyllama/llama-7b-bsz=2]": + "xfail due to SW-185015", + "unit/hybrid_engine/test_he_llama.py::TestHybridEngineLlama::test_functionality[huggyllama/llama-7b-bsz=1]": + "xfail due to SW-185015", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[distilbert/distilgpt2-text-generation-fp32-noCG-noTriton]": + "Xfail due to SW-182748", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[distilbert/distilgpt2-text-generation-fp16-noCG-noTriton]": + "Xfail due to SW-182748", + "unit/inference/test_inference.py::TestModelTaskKIFalse::test[distilbert/distilgpt2-text-generation-bf16-noCG-noTriton]": + "Xfail due to SW-182748", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[cpu-EleutherAI/gpt-neo-125m-zero_stage=3-bsz=1]": + "Xfail due to SW-181951", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[cpu-facebook/opt-350m-zero_stage=3-bsz=1]": + "Xfail due to SW-181951", + "unit/hybrid_engine/test_he_lora.py::TestHybridEngineLoRA::test_lora[cpu-bigscience/bloom-560m-zero_stage=3-bsz=1]": + "Xfail due to SW-181951", +} +gpu_xfail_tests = { + "unit/hybrid_engine/test_he_llama.py::TestHybridEngineLlama::test_functionality[huggyllama/llama-7b-bsz=2]": + "Test requires higher memory.", + "unit/inference/quantization/test_int4_quantization.py::TestQuantizedInt4::test_zero3_int4_post_init_quant_nvme_offload": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/quantization/test_int4_quantization.py::TestQuantizedInt4::test_zero3_int4_quantized_initialization_nvme_offload": + "Xfailed. failure observed on vanilla as well.", + "unit/ops/quantizer/test_fake_quantization.py::test_fake_quant_dequant[16-tensor_shape0]": + "Xfailed. failure observed on vanilla as well.", + "unit/ops/quantizer/test_fake_quantization.py::test_fake_quant_dequant[1-tensor_shape0]": + "Xfailed. failure observed on vanilla as well.", + "unit/ops/quantizer/test_fake_quantization.py::test_fake_quant_dequant[16-tensor_shape1]": + "Xfailed. failure observed on vanilla as well.", + "unit/ops/quantizer/test_fake_quantization.py::test_fake_quant_dequant[1-tensor_shape1]": + "Xfailed. failure observed on vanilla as well.", + "unit/hybrid_engine/test_he_llama.py::TestHybridEngineLlama::test_functionality[huggyllama/llama-7b-bsz=1]": + "Test requires higher memory.", + "unit/inference/v2/kernels/ragged_ops/test_atom_builder.py::test_single_sequence[seq_params2]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_atom_builder.py::test_single_sequence[seq_params0]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_atom_builder.py::test_single_sequence[seq_params3]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_atom_builder.py::test_single_sequence[seq_params1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_blocked_attn.py::test_multiple_prompts[prompt_lengths3]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_blocked_attn.py::test_multiple_prompts[prompt_lengths1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_blocked_attn.py::test_continuation[seq_params1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_blocked_attn.py::test_single_prompt[2037]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_blocked_attn.py::test_rotary_emb[False]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_blocked_attn.py::test_gqa[head_config0]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_blocked_attn.py::test_rotary_emb[True]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_blocked_attn.py::test_single_prompt[65]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_blocked_attn.py::test_single_prompt[128]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_blocked_attn.py::test_single_prompt[256]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_blocked_attn.py::test_head_size[128]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_blocked_attn.py::test_single_prompt[33]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_blocked_attn.py::test_single_prompt[2]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_blocked_attn.py::test_continuation[seq_params4]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_blocked_attn.py::test_gqa[head_config2]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_blocked_attn.py::test_head_size[64]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_blocked_attn.py::test_multiple_prompts[prompt_lengths2]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_blocked_attn.py::test_fully_composed": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_blocked_attn.py::test_gqa[head_config1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_blocked_attn.py::test_multiple_prompts[prompt_lengths0]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_blocked_attn.py::test_continuation[seq_params0]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_blocked_attn.py::test_continuation[seq_params3]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_blocked_attn.py::test_continuation[seq_params2]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_kv_copy.py::test_single_sequence_multiple_blocks[177-0]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_kv_copy.py::test_single_sequence_multiple_blocks[117-88]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_kv_copy.py::test_single_sequence_single_block[33-8]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_kv_copy.py::test_single_sequence_multiple_blocks[169-8]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_kv_copy.py::test_single_sequence_single_block[17-0]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_kv_copy.py::test_single_sequence_multiple_blocks[128-0]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_kv_copy.py::test_multi_sequence": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_kv_copy.py::test_single_sequence_single_block[1-0]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_kv_copy.py::test_single_sequence_single_block[63-1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_rotary_emb.py::test_single_sequence_multiple_blocks[False-169-8]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_rotary_emb.py::test_multi_sequences[True]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_rotary_emb.py::test_single_sequence_single_block[False-1-0]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_rotary_emb.py::test_single_sequence_multiple_blocks[True-169-8]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_rotary_emb.py::test_single_sequence_single_block[True-1-0]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_rotary_emb.py::test_single_sequence_multiple_blocks[False-177-0]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_rotary_emb.py::test_multi_sequences[False]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_rotary_emb.py::test_single_sequence_single_block[True-33-15]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_rotary_emb.py::test_single_sequence_single_block[True-17-0]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_rotary_emb.py::test_single_sequence_single_block[False-33-15]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_rotary_emb.py::test_single_sequence_multiple_blocks[False-128-0]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_rotary_emb.py::test_single_sequence_multiple_blocks[True-117-88]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_rotary_emb.py::test_single_sequence_single_block[False-17-0]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_rotary_emb.py::test_single_sequence_single_block[False-1-63]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_rotary_emb.py::test_single_sequence_multiple_blocks[True-128-0]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_rotary_emb.py::test_single_sequence_multiple_blocks[False-117-88]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_rotary_emb.py::test_single_sequence_single_block[True-1-63]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_rotary_emb.py::test_single_sequence_multiple_blocks[True-177-0]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_logits_gather.py::test_supported_dtypes[dtype0]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_logits_gather.py::test_problem_size_permutations[1024]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_logits_gather.py::test_multiple_sequences[seq_lens0]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_logits_gather.py::test_problem_size_permutations[6144]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_logits_gather.py::test_multiple_sequences[seq_lens3]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_logits_gather.py::test_supported_dtypes[dtype1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_logits_gather.py::test_problem_size_permutations[6784]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_logits_gather.py::test_multiple_sequences[seq_lens2]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_logits_gather.py::test_multiple_sequences[seq_lens1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_moe_gather.py::test_moe_gather[False-278-64]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_moe_gather.py::test_moe_gather[False-13-64]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_moe_gather.py::test_moe_gather[False-1977-64]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_moe_gather.py::test_moe_gather[True-278-64]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_moe_gather.py::test_moe_gather[True-13-64]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_moe_gather.py::test_moe_gather[True-1977-64]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/cutlass_ops/test_moe_gemm.py::test_multi_expert[1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_moe_scatter.py::test_moe_scatter[True-13-64]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_moe_scatter.py::test_moe_scatter[False-13-64]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_moe_scatter.py::test_moe_scatter[True-1977-64]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_moe_scatter.py::test_moe_scatter[True-278-64]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_moe_scatter.py::test_moe_scatter[False-278-64]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_moe_scatter.py::test_moe_scatter[False-1977-64]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_ragged_embed.py::test_positional_embedding[seq_lens0]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_ragged_embed.py::test_problem_size_permutations[50304-6144]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_ragged_embed.py::test_dtype_permutations[embed_dtype1-token_dtype1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_ragged_embed.py::test_positional_embedding[seq_lens1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_ragged_embed.py::test_complex_sequences[True-seq_lens1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_ragged_embed.py::test_positional_embedding_offset": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_ragged_embed.py::test_problem_size_permutations[32000-5120]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_ragged_embed.py::test_complex_sequences[True-seq_lens0]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_ragged_embed.py::test_problem_size_permutations[1024-1024]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_ragged_embed.py::test_positional_embedding[seq_lens3]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_ragged_embed.py::test_dtype_permutations[embed_dtype0-token_dtype1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_ragged_embed.py::test_complex_sequences[False-seq_lens0]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_ragged_embed.py::test_dtype_permutations[embed_dtype0-token_dtype0]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_ragged_embed.py::test_positional_embedding[seq_lens2]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_ragged_embed.py::test_dtype_permutations[embed_dtype1-token_dtype0]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_ragged_embed.py::test_complex_sequences[False-seq_lens1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_top_1_gating.py::test_single_mapping_gating[433-128]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_top_1_gating.py::test_score_accuracy[32-128]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_top_1_gating.py::test_negative_logits": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_top_1_gating.py::test_score_accuracy[89-128]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_top_1_gating.py::test_single_mapping_gating[32-128]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_top_1_gating.py::test_single_mapping_gating[89-128]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_top_1_gating.py::test_single_mapping_gating[17-16]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_top_1_gating.py::test_single_mapping_gating[1-16]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_top_1_gating.py::test_score_accuracy[433-2]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_top_1_gating.py::test_score_accuracy[17-16]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_top_1_gating.py::test_determinism": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_top_1_gating.py::test_score_accuracy[1-16]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/core_ops/test_blas_linear.py::test_blas_linear_t[problem_shape0-fp_dtype1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/core_ops/test_blas_linear.py::test_blas_linear_t[problem_shape4-fp_dtype1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/core_ops/test_blas_linear.py::test_blas_linear_t[problem_shape7-fp_dtype1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/core_ops/test_blas_linear.py::test_blas_linear[problem_shape5-fp_dtype1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/core_ops/test_blas_linear.py::test_blas_linear_t[problem_shape1-fp_dtype1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/core_ops/test_blas_linear.py::test_blas_linear[problem_shape3-fp_dtype1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/core_ops/test_blas_linear.py::test_blas_linear[problem_shape2-fp_dtype1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/core_ops/test_blas_linear.py::test_blas_linear[problem_shape4-fp_dtype1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/core_ops/test_blas_linear.py::test_blas_linear_t[problem_shape3-fp_dtype1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/core_ops/test_blas_linear.py::test_blas_linear[problem_shape6-fp_dtype1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/core_ops/test_blas_linear.py::test_blas_linear_t[problem_shape5-fp_dtype1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/core_ops/test_blas_linear.py::test_blas_linear[problem_shape7-fp_dtype1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/core_ops/test_blas_linear.py::test_blas_linear_t[problem_shape6-fp_dtype1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/core_ops/test_blas_linear.py::test_blas_linear[problem_shape1-fp_dtype1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/core_ops/test_blas_linear.py::test_blas_linear_t[problem_shape2-fp_dtype1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/core_ops/test_blas_linear.py::test_blas_linear[problem_shape0-fp_dtype1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_flash.py::test_multiple_prompts[prompt_lengths3]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_flash.py::test_single_prompt[256]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_flash.py::test_gqa[head_config0]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_flash.py::test_continuation[seq_params2]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_flash.py::test_multiple_prompts[prompt_lengths1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_flash.py::test_single_prompt[65]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_flash.py::test_continuation[seq_params0]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_flash.py::test_head_size[128]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_flash.py::test_continuation[seq_params4]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_flash.py::test_single_prompt[2]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_flash.py::test_fully_composed": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_flash.py::test_head_size[64]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_flash.py::test_continuation[seq_params1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_flash.py::test_multiple_prompts[prompt_lengths0]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_flash.py::test_continuation[seq_params3]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_flash.py::test_gqa[head_config2]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_flash.py::test_single_prompt[128]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_flash.py::test_single_prompt[33]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_flash.py::test_single_prompt[2037]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_flash.py::test_multiple_prompts[prompt_lengths2]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/ragged_ops/test_blocked_flash.py::test_gqa[head_config1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_cutlass_moe.py::test_expert_variance[64]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_cutlass_moe.py::test_in_out_channels[2048-8192]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_cutlass_moe.py::test_expert_variance[32]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_cutlass_moe.py::test_activation_types[ActivationType.RELU]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_cutlass_moe.py::test_dtypes[dtype0]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_cutlass_moe.py::test_activation_types[ActivationType.GELU]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_cutlass_moe.py::test_activation_types[ActivationType.SILU]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_cutlass_moe.py::test_successive_inputs": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_cutlass_moe.py::test_in_out_channels[4096-2048]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_cutlass_moe.py::test_in_out_channels[6144-3072]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/modules/test_cutlass_moe.py::test_expert_variance[2]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/cutlass_ops/test_moe_gemm.py::test_dtypes[DtypeEnum.bf16]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/cutlass_ops/test_moe_gemm.py::test_act_fns[ActivationType.GELU]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/cutlass_ops/test_moe_gemm.py::test_dtypes[DtypeEnum.fp16]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/cutlass_ops/test_moe_gemm.py::test_single_expert[13-2048-2048]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/cutlass_ops/test_moe_gemm.py::test_act_fns[ActivationType.SILU]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/cutlass_ops/test_moe_gemm.py::test_multi_expert[64]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/cutlass_ops/test_moe_gemm.py::test_single_expert[256-1024-4096]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/cutlass_ops/test_moe_gemm.py::test_multi_expert[4]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/cutlass_ops/test_moe_gemm.py::test_single_expert[893-5120-2560]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/cutlass_ops/test_moe_gemm.py::test_multi_expert[16]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/cutlass_ops/test_moe_gemm.py::test_multi_expert[128]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/cutlass_ops/test_moe_gemm.py::test_act_fns[ActivationType.RELU]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/cutlass_ops/test_moe_gemm.py::test_single_expert[278-5120-2048]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/v2/kernels/cutlass_ops/test_moe_gemm.py::test_multi_expert[1]": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_post_init_quant_nvme_offload": + "Xfailed. failure observed on vanilla as well.", + "unit/inference/quantization/test_intX_quantization.py::TestQuantizedInt::test_zero3_int4_quantized_initialization_nvme_offload": + "Xfailed. failure observed on vanilla as well.", + "unit/ops/accelerators/test_accelerator_forward.py::TestCUDAForward::test_forward[8-8192-128-64-3-False-True]": + "Test requires higher memory.", + "unit/ops/adam/test_adamw/TestAdamConfigs/test[AdamW-True-False-True-resulting_optimizer6]": + "Xfail, due to SW-176845", + "unit/ops/adam/test_adamw/TestAdamConfigs/test[AdamW-True-False-False-resulting_optimizer2]": + "Xfail, due to SW-176845", + "unit/ops/adam/test_adamw/TestAdamConfigs/test[Adam-True-False-True-resulting_optimizer14]": + "Xfail, due to SW-176845", + "unit/ops/adam/test_adamw/TestAdamConfigs/test[Adam-True-False-False-resulting_optimizer10]": + "Xfail, due to SW-176845", + "unit/inference/test_inference.py::TestMPSize::test[fp32-gpt-neo]": + "Xfail due to SW-177890 and SW-177889", + "unit/hybrid_engine/test_he_all.py::TestHybridEngineTextGen::test_functionality[facebook/opt-1.3b-bsz=1]": + "Xfail due to SW-177889", + "unit/hybrid_engine/test_he_all.py::TestHybridEngineTextGen::test_functionality[EleutherAI/gpt-neo-1.3B-bsz=2]": + "Xfail due to SW-177889", + "unit/hybrid_engine/test_he_all.py::TestHybridEngineTextGen::test_functionality[facebook/opt-1.3b-bsz=2]": + "Xfail due to SW-177889", + "unit/hybrid_engine/test_he_all.py::TestHybridEngineTextGen::test_functionality[EleutherAI/gpt-neo-1.3B-bsz=1]": + "Xfail due to SW-177889", + "unit/inference/test_inference.py::TestMPSize::test[fp16-gpt-neo]": + "Xfail due to SW-177889", + "unit/inference/test_inference.py::TestLowCpuMemUsage::test[gpt2]": + "Xfail due to SW-177889", + "unit/inference/test_model_profiling.py::TestModelProfiling::test[False-False]": + "Xfail due to SW-177889", + "unit/inference/test_model_profiling.py::TestModelProfiling::test[False-True]": + "Xfail due to SW-177889", + "unit/inference/test_model_profiling.py::TestModelProfiling::test[True-False]": + "Xfail due to SW-177889", + "unit/inference/test_model_profiling.py::TestModelProfiling::test[True-True]": + "Xfail due to SW-177889", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[none-2-dtype2]": + "Xfail due to pytorch>2.0 is required and Nvidia Titan XP GPU not supported", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-3-dtype2]": + "Xfail due to pytorch>2.0 is required and Nvidia Titan XP GPU not supported", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[cpu-1-dtype1]": + "Xfail due to pytorch>2.0 is required and Nvidia Titan XP GPU not supported", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[none-2-dtype1]": + "Xfail due to pytorch>2.0 is required and Nvidia Titan XP GPU not supported", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[nvme-3-dtype1]": + "Xfail due to pytorch>2.0 is required and Nvidia Titan XP GPU not supported", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[cpu-1-dtype2]": + "Xfail due to pytorch>2.0 is required and Nvidia Titan XP GPU not supported", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[cpu-2-dtype1]": + "Xfail due to pytorch>2.0 is required and Nvidia Titan XP GPU not supported", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[none-1-dtype1]": + "Xfail due to pytorch>2.0 is required and Nvidia Titan XP GPU not supported", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[cpu-2-dtype2]": + "Xfail due to pytorch>2.0 is required and Nvidia Titan XP GPU not supported", + "unit/runtime/compile/test_compile_zero.py::TestZeRO::test_compile_zero[none-1-dtype2]": + "Xfail due to pytorch>2.0 is required and Nvidia Titan XP GPU not supported", + "unit/runtime/compile/test_load_config.py::TestConfigLoad::test_compile": + "Nvidia Titan XP GPU not supported", + "unit/runtime/compile/test_load_config.py::TestConfigLoad::test_set_compile_kwargs": + "Nvidia Titan XP GPU not supported", + "unit/runtime/compile/test_load_config.py::TestConfigLoad::test_set_compiler_fn": + "Nvidia Titan XP GPU not supported", + "unit/runtime/compile/test_load_config.py::TestConfigLoad::test_compile_kwargs": + "Nvidia Titan XP GPU not supported", + "unit/hybrid_engine/test_he_all.py::TestHybridEngineTextGen::test_functionality[facebook/opt-1.3b-bsz=2]": + "Xfail due to SW-177889", + "unit/hybrid_engine/test_he_all.py::TestHybridEngineTextGen::test_functionality[EleutherAI/gpt-neo-1.3B-bsz=1]": + "Xfail due to SW-177889", + "unit/hybrid_engine/test_he_all.py::TestHybridEngineTextGen::test_functionality[facebook/opt-1.3b-bsz=1]": + "Xfail due to SW-177889", + "unit/hybrid_engine/test_he_all.py::TestHybridEngineTextGen::test_functionality[EleutherAI/gpt-neo-1.3B-bsz=2]": + "Xfail due to SW-177889", + "unit/inference/test_inference.py::TestLowCpuMemUsage::test[gpt2]": + "Xfail due to SW-177889", + "unit/inference/test_model_profiling.py::TestModelProfiling::test[True-True]": + "Xfail due to SW-177889", + "unit/inference/test_model_profiling.py::TestModelProfiling::test[False-True]": + "Xfail due to SW-177889", + "unit/inference/test_model_profiling.py::TestModelProfiling::test[True-False]": + "Xfail due to SW-177889", + "unit/inference/test_model_profiling.py::TestModelProfiling::test[False-False]": + "Xfail due to SW-177889", + "unit/inference/v2/ragged/test_manager_configs.py::test_too_small_max_ragged_batch_size": + "Xfail due to ValidationError if the input data cannot be parsed to form a valid model", + "unit/inference/v2/ragged/test_manager_configs.py::test_zero_max_tracked_sequences": + "Xfail due to ValidationError if the input data cannot be parsed to form a valid model", + "unit/inference/v2/ragged/test_manager_configs.py::test_zero_max_ragged_batch_size": + "Xfail due to ValidationError if the input data cannot be parsed to form a valid model", + "unit/inference/v2/ragged/test_manager_configs.py::test_negative_max_ragged_batch_size": + "Xfail due to ValidationError if the input data cannot be parsed to form a valid model", + "unit/inference/v2/ragged/test_manager_configs.py::test_too_small_max_tracked_sequences": + "Xfail due to ValidationError if the input data cannot be parsed to form a valid model", + "unit/inference/v2/ragged/test_manager_configs.py::test_negative_max_tracked_sequences": + "Xfail due to ValidationError if the input data cannot be parsed to form a valid model", + "unit/inference/v2/ragged/test_manager_configs.py::test_zero_max_ragged_sequence_count": + "Xfail due to ValidationError if the input data cannot be parsed to form a valid model", + "unit/inference/v2/ragged/test_manager_configs.py::test_negative_max_ragged_sequence_count": + "Xfail due to ValidationError if the input data cannot be parsed to form a valid model", + "unit/runtime/test_ds_initialize.py::TestNoOptim::test[0]": + "Xfail due to OOM", + "unit/runtime/test_ds_initialize.py::TestNoOptim::test[3]": + "Xfail due to OOM", + "unit/runtime/test_ds_initialize.py::TestClientOptimizer::test[Callable]": + "Xfail due to OOM", + "unit/runtime/test_ds_initialize.py::TestClientOptimizer::test[Optimizer]": + "Xfail due to OOM", + "unit/runtime/test_ds_initialize.py::TestClientOptimizer::test[None]": + "Xfail due to OOM", + "unit/runtime/test_ds_initialize.py::TestConfigOptimizer::test[False]": + "Xfail due to OOM", + "unit/runtime/test_ds_initialize.py::TestConfigOptimizer::test[True]": + "Xfail due to OOM", + "unit/checkpoint/test_latest_checkpoint.py::TestLatestCheckpoint::test_existing_latest[True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[1-False-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[3-False-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[3-True-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[2-True-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[1-False-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[0-False-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[2-False-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_lr_scheduler[3-False-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[2-False-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[0-False-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[3-True-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_lr_scheduler.py::TestLRSchedulerCheckpoint::test_checkpoint_no_lr_scheduler[2-True-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[4-True-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[2-True-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[2-False-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_moe_checkpoint.py::TestMoECheckpoint::test_checkpoint_moe_and_zero[4-False-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_other_optimizer.py::TestOtherOptimizerCheckpoint::test_checkpoint_unfused_optimizer[True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_other_optimizer.py::TestOtherOptimizerCheckpoint::test_checkpoint_fused_optimizer[True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_other_optimizer.py::TestOtherOptimizerCheckpoint::test_checkpoint_fp32_optimizer[True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[0-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[0-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[2-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[1-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[2-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_save_before_accum_grad_is_done[3-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[3-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROSaveLoadEdgeCase::test_load_immediate_save[1-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-True-True-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-False-False-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-True-False-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-True-True-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-False-False-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[False-False-True-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-True-False-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROElasticCheckpoint::test_elastic_checkpoint_fixed_dp[True-False-True-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_not_load_optimizer_state[3-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_module_only[3-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_module_only[2-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_optimizer_state[2-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_optimizer_state[3-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_not_load_optimizer_state[1-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_module_only[1-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_load_optimizer_state[1-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpointFrozenWeights::test_not_load_optimizer_state[2-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[3-True-deepspeed_adam-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[3-False-Adam-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[3-True-deepspeed_adam-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[2-True-deepspeed_adam-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[2-True-deepspeed_adam-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[1-False-Adam-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[2-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_optimizer_state[2-False-Adam-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[1-False-Adam-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[3-False-Adam-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[1-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[3-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_hybrid_optimizer_state[1-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_hybrid_optimizer_state[2-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_not_load_optimizer_state[2-False-Adam-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_load_module_only[0-True]": + "Compile tests not supported on Titan-XP", + "unit/checkpoint/test_zero_optimizer.py::TestZeROCheckpoint::test_pipeline_checkpoint_loading[3-True]": + "Compile tests not supported on Titan-XP", + "unit/inference/test_human_eval.py::test_human_eval[codellama/CodeLlama-7b-Python-hf]": + "Xfail due to SW-182759", + "unit/accelerator/test_accelerator.py::test_abstract_methods_defined[deepspeed.accelerator.xpu_accelerator]": + "Xfail due to SW-182749", + "unit/launcher/test_user_args.py::test_user_args[True-I'm going to tell them \"DeepSpeed is the best\"]": + "Xfail due to SW-182753", + "unit/launcher/test_user_args.py::test_user_args[True-'\"translate English to Romanian: \"']": + "Xfail due to SW-182753", + "unit/launcher/test_user_args.py::test_user_args[True-'I am 72\" tall']": + "Xfail due to SW-182753", + "unit/launcher/test_user_args.py::test_user_args[True-\"I am 6' tall\"]": + "Xfail due to SW-182753", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[Optimizer-None]": + "Cuda OOM", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[None-None]": + "Cuda OOM", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[Optimizer-_LRScheduler]": + "Cuda OOM", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[None-_LRScheduler]": + "Cuda OOM", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[Callable-Callable]": + "Cuda OOM", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[Callable-None]": + "Cuda OOM", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[Optimizer-Callable]": + "Cuda OOM", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[None-Callable]": + "Cuda OOM", + "unit/runtime/test_ds_initialize.py::TestClientLrScheduler::test[Callable-_LRScheduler]": + "Cuda OOM", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp32-CG-noTriton-True]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp32-CG-noTriton-True]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp32-CG-noTriton-True]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp32-CG-noTriton-True]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp32-CG-noTriton-True]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp32-CG-noTriton-True]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp32-CG-noTriton-True]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp32-CG-noTriton-True]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp32-CG-noTriton-True]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp32-CG-noTriton-True]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[deepset/minilm-uncased-squad2-question-answering-fp16-CG-noTriton-True]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[dslim/bert-base-NER-token-classification-fp16-CG-noTriton-True]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-whole-word-masking-finetuned-squad-question-answering-fp16-CG-noTriton-True]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[cross-encoder/ms-marco-MiniLM-L-12-v2-text-classification-fp16-CG-noTriton-True]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp16-noCG-noTriton-False]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp16-noCG-noTriton-False]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp16-CG-noTriton-False]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp16-CG-noTriton-True]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp16-noCG-noTriton-True]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp16-CG-noTriton-True]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp16-CG-noTriton-False]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp16-CG-noTriton-False]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp16-CG-noTriton-False]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp16-CG-noTriton-True]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp16-CG-noTriton-False]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp16-CG-noTriton-False]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp16-noCG-noTriton-False]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp16-CG-noTriton-False]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp16-CG-noTriton-True]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp16-noCG-noTriton-True]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-base-fill-mask-fp16-noCG-noTriton-False]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp16-noCG-noTriton-True]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-uncased-fill-mask-fp16-noCG-noTriton-True]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-cased-fill-mask-fp16-noCG-noTriton-True]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp16-noCG-noTriton-True]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp16-CG-noTriton-True]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp16-CG-noTriton-True]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp16-noCG-noTriton-True]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp16-CG-noTriton-True]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp16-noCG-noTriton-False]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-large-cased-fill-mask-fp16-CG-noTriton-False]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-uncased-fill-mask-fp16-noCG-noTriton-False]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[FacebookAI/roberta-large-fill-mask-fp16-noCG-noTriton-False]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-cased-fill-mask-fp16-CG-noTriton-True]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp16-noCG-noTriton-False]": + "Same failure in Vanilla.", + "unit/inference/test_inference.py::TestModelTask::test[google-bert/bert-base-multilingual-uncased-fill-mask-fp16-noCG-noTriton-True]": + "Same failure in Vanilla.", +}