diff --git a/src/sagemaker/local/image.py b/src/sagemaker/local/image.py index 38d963bfda..1fbc93fca0 100644 --- a/src/sagemaker/local/image.py +++ b/src/sagemaker/local/image.py @@ -759,7 +759,9 @@ def _create_docker_host(self, host, environment, optml_subdirs, command, volumes # for GPU support pass in nvidia as the runtime, this is equivalent # to setting --runtime=nvidia in the docker commandline. if self.instance_type == "local_gpu": - host_config["runtime"] = "nvidia" + host_config["deploy"] = { + "resources": {"reservations": {"devices": [{"capabilities": ["gpu"]}]}} + } if command == "serve": serving_port = ( diff --git a/tests/unit/test_image.py b/tests/unit/test_image.py index 020f648834..e7bde99610 100644 --- a/tests/unit/test_image.py +++ b/tests/unit/test_image.py @@ -574,8 +574,10 @@ def test_container_has_gpu_support(tmpdir, sagemaker_session): ) docker_host = sagemaker_container._create_docker_host("host-1", {}, set(), "train", []) - assert "runtime" in docker_host - assert docker_host["runtime"] == "nvidia" + assert "deploy" in docker_host + assert docker_host["deploy"] == { + "resources": {"reservations": {"devices": [{"capabilities": ["gpu"]}]}} + } def test_container_does_not_enable_nvidia_docker_for_cpu_containers(sagemaker_session):