Skip to content

Commit

Permalink
Merge branch 'master' into hablb/ipg_grads
Browse files Browse the repository at this point in the history
  • Loading branch information
loadams authored Mar 13, 2024
2 parents dcf6282 + a6fb4d3 commit 8f14a34
Show file tree
Hide file tree
Showing 7 changed files with 29 additions and 3 deletions.
11 changes: 9 additions & 2 deletions tests/unit/inference/test_inference.py
Original file line number Diff line number Diff line change
Expand Up @@ -653,8 +653,15 @@ def no_pool_bootstrap_stderr(f, xs, iters):
setattr(lm, model_family, getattr(lm, model_family).half().to(device))
lm._device = device
else:
lm = lm_eval.models.get_model(model_family).create_from_arg_string(
f"pretrained={model_name}", {"device": get_accelerator().device_name()})
if get_accelerator().device_name() == 'hpu':
#lm_eval not supporting HPU device, so get model with CPU and move it to HPU.
lm = lm_eval.models.get_model(model_family).create_from_arg_string(f"pretrained={model_name}",
{"device": "cpu"})
setattr(lm, model_family, getattr(lm, model_family).to(device))
lm._device = device
else:
lm = lm_eval.models.get_model(model_family).create_from_arg_string(
f"pretrained={model_name}", {"device": get_accelerator().device_name()})

get_accelerator().synchronize()
start = time.time()
Expand Down
2 changes: 2 additions & 0 deletions tests/unit/runtime/compile/test_compile_wrapper.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,8 @@ def base_config():
"backend": "inductor"
}
}
if get_accelerator().device_name() == 'hpu':
config_dict['compile']['backend'] = 'hpu_backend'
return config_dict


Expand Down
2 changes: 2 additions & 0 deletions tests/unit/runtime/compile/test_compile_zero.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,8 @@ def test_compile_zero(self, tmpdir, zero_stage, dtype, offload_device):
}
}

if get_accelerator().device_name() == 'hpu':
config_dict['compile']['backend'] = 'hpu_backend'
if offload_device == OffloadDeviceEnum.cpu:
config_dict["zero_optimization"]["offload_optimizer"] = {"device": offload_device}
elif offload_device == OffloadDeviceEnum.nvme:
Expand Down
3 changes: 3 additions & 0 deletions tests/unit/runtime/compile/test_load_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,9 @@ def base_config():
"backend": "inductor"
}
}

if get_accelerator().device_name() == 'hpu':
config_dict['compile']['backend'] = 'hpu_backend'
return config_dict


Expand Down
3 changes: 3 additions & 0 deletions tests/unit/runtime/half_precision/onebit/test_onebit.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,9 @@
pytest.skip("NCCL-based 1-bit compression is not yet supported w. ROCm 5 until cupy supports ROCm 5",
allow_module_level=True)

if get_accelerator().device_name() == 'hpu':
pytest.skip("1-bit compression is not supported by HPU.", allow_module_level=True)


@pytest.mark.parametrize("dtype", [torch.float32, torch.float16], ids=["fp32", "fp16"])
class TestOneBitAdamBasic(DistributedTest):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4,9 +4,14 @@
# DeepSpeed Team

import torch
import pytest
import deepspeed
from unit.common import DistributedTest
from unit.util import skip_on_arch
from deepspeed.accelerator import get_accelerator

if get_accelerator().device_name() == 'hpu':
pytest.skip("sparse_gradients not supported by HPU.", allow_module_level=True)


class Model(torch.nn.Module):
Expand Down
6 changes: 5 additions & 1 deletion tests/unit/runtime/sparse_tensor/test_sparse_grads.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,11 +4,15 @@
# DeepSpeed Team

import torch
import pytest
import deepspeed
from unit.common import DistributedTest

from deepspeed.accelerator import get_accelerator
import deepspeed.utils.groups as groups

if get_accelerator().device_name() == 'hpu':
pytest.skip("sparse_gradients not supported by HPU.", allow_module_level=True)


class Model(torch.nn.Module):

Expand Down

0 comments on commit 8f14a34

Please sign in to comment.