Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

#45 add timeout to train job #46

Open
wants to merge 4 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions caikit_ray_backend/blocks/ray_train.py
Original file line number Diff line number Diff line change
Expand Up @@ -218,6 +218,9 @@ def train(
error.value_check("<RYT87231812E>", num_gpus > 0)
env_vars["requested_gpus"] = num_gpus

training_timeout = self.config.get("training_timeout", 60)
env_vars["training_timeout"] = float(training_timeout)

# Serialize **kwargs and add them to environment variables
my_kwargs = {}
for key, value in kwargs.items():
Expand Down
19 changes: 13 additions & 6 deletions caikit_ray_backend/ray_submitter.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,13 +76,20 @@ def main():
if model_path:
error.type_check("<RYT70238308E>", str, model_path=model_path)

# Finally kick off trainig
timeout = runtime_env.get("training_timeout", float(60))

# Finally kick off training
with alog.ContextTimer(log.debug, "Done training %s in: ", module_class):
ray.get(
ray_training_tasks.train_and_save.options(
num_cpus=num_cpus, num_gpus=num_gpus
).remote(module_class, model_path, *args, **kwargs)
)
task = ray_training_tasks.train_and_save.options(
num_cpus=num_cpus, num_gpus=num_gpus
).remote(module_class, model_path, *args, **kwargs)
ready, _ = ray.wait([task], timeout=timeout)
if ready:
ray.get(task)
else:
ray.cancel(task)
log.error("Task did not complete before time out.")
raise TimeoutError("Task did not complete before time out.")


if __name__ == "__main__":
Expand Down
42 changes: 38 additions & 4 deletions tests/test_ray_backend.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
"""
# Standard
from datetime import datetime
import logging
import os
import time

Expand Down Expand Up @@ -46,7 +47,10 @@ def jsonl_file_data_stream():


def test_job_submission_client(mock_ray_cluster, jsonl_file_data_stream):
config = {"connection": {"address": mock_ray_cluster.address}}
config = {
"connection": {"address": mock_ray_cluster.address},
"training_timeout": 30.0,
}
trainer = RayJobTrainModule(config, "ray_backend")

args = [jsonl_file_data_stream]
Expand Down Expand Up @@ -82,7 +86,10 @@ def test_job_submission_client(mock_ray_cluster, jsonl_file_data_stream):


def test_wait(mock_ray_cluster, jsonl_file_data_stream):
config = {"connection": {"address": mock_ray_cluster.address}}
config = {
"connection": {"address": mock_ray_cluster.address},
"training_timeout": 30.0,
}
trainer = RayJobTrainModule(config, "ray_backend")

args = [jsonl_file_data_stream]
Expand All @@ -101,7 +108,10 @@ def test_wait(mock_ray_cluster, jsonl_file_data_stream):


def test_load(mock_ray_cluster, jsonl_file_data_stream):
config = {"connection": {"address": mock_ray_cluster.address}}
config = {
"connection": {"address": mock_ray_cluster.address},
"training_timeout": 30.0,
}
trainer = RayJobTrainModule(config, "ray_backend")

args = [jsonl_file_data_stream]
Expand All @@ -118,7 +128,10 @@ def test_load(mock_ray_cluster, jsonl_file_data_stream):


def test_cancel(mock_ray_cluster, jsonl_file_data_stream):
config = {"connection": {"address": mock_ray_cluster.address}}
config = {
"connection": {"address": mock_ray_cluster.address},
"training_timeout": 30.0,
}
trainer = RayJobTrainModule(config, "ray_backend")

args = [jsonl_file_data_stream]
Expand All @@ -142,6 +155,27 @@ def test_cancel(mock_ray_cluster, jsonl_file_data_stream):
assert status == TrainingStatus.CANCELED


def test_timeout(mock_ray_cluster, jsonl_file_data_stream):
config = {
"connection": {"address": mock_ray_cluster.address},
"training_timeout": 0.25,
}
trainer = RayJobTrainModule(config, "ray_backend")

args = [jsonl_file_data_stream]
model_future = trainer.train(
SampleModule,
*args,
save_path="/tmp",
)

time.sleep(3)

status = model_future.get_info().status
print("Final status was", status)
assert status == TrainingStatus.ERRORED


## Test Ray Backend


Expand Down
5 changes: 4 additions & 1 deletion tox.ini
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ deps =
pytest>=6.2.5,<7.0
pytest-cov>=2.10.1,<3.0
pytest-html>=3.1.1,<4.0
pytest-catchlog
tls_test_tools>=0.1.1
wheel>=0.38.4
tests/fixtures
Expand All @@ -20,6 +21,7 @@ passenv =
LOG_FORMATTER
LOG_THREAD_ID
LOG_CHANNEL_WIDTH
env_name=dev
commands = pytest --cov=caikit --cov-report=html:coverage-{env_name} --cov-report=xml:coverage-{env_name}.xml {posargs:tests}

; Unclear: We probably want to test wheel packaging
Expand All @@ -32,7 +34,8 @@ description = format with pre-commit
deps = pre-commit>=3.0.4,<4.0
commands = ./scripts/fmt.sh
allowlist_externals = ./scripts/fmt.sh
skip_install = True # Skip package install since fmt doesn't need to execute code, for ⚡⚡⚡
skip_install = True
# Skip package install since fmt doesn't need to execute code, for ⚡⚡⚡
basepython = py39

[testenv:lint]
Expand Down