Skip to content

Commit 2f68f4a

Browse files
authored
fix: load EI model to CPU by default in model_fn (#68)
1 parent eed2f5e commit 2f68f4a

File tree

1 file changed

+2
-1
lines changed

1 file changed

+2
-1
lines changed

src/sagemaker_pytorch_serving_container/default_inference_handler.py

+2-1
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,8 @@ def default_model_fn(self, model_dir):
3939
if not os.path.exists(model_path):
4040
raise FileNotFoundError("Failed to load model with default model_fn: missing file {}."
4141
.format(DEFAULT_MODEL_FILENAME))
42-
return torch.jit.load(model_path)
42+
# Client-framework is CPU only. But model will run in Elastic Inference server with CUDA.
43+
return torch.jit.load(model_path, map_location=torch.device('cpu'))
4344
else:
4445
raise NotImplementedError(textwrap.dedent("""
4546
Please provide a model_fn implementation.

0 commit comments

Comments
 (0)