Skip to content

Update TS Archiver to v0.3.1 / Integ tests #95

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 15 commits into from
Mar 17, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ def read(fname):
# different packages for different variants
install_requires=['numpy', 'retrying', 'sagemaker-inference>=1.3.1'],
extras_require={
'test': ['boto3==1.10.32', 'coverage==4.5.3', 'docker-compose==1.23.2', 'flake8==3.7.7', 'Flask==1.1.1',
'test': ['boto3>=1.10.44', 'coverage==4.5.3', 'docker-compose==1.23.2', 'flake8==3.7.7', 'Flask==1.1.1',
'mock==2.0.0', 'pytest==4.4.0', 'pytest-cov==2.7.1', 'pytest-xdist==1.28.0', 'PyYAML==3.10',
'sagemaker==1.56.3', 'sagemaker-containers>=2.5.4', 'six==1.12.0', 'requests==2.20.0',
'requests_mock==1.6.0', 'torch==1.6.0', 'torchvision==0.7.0', 'tox==3.7.0']
Expand Down
7 changes: 2 additions & 5 deletions src/sagemaker_pytorch_serving_container/torchserve.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,6 @@
)
DEFAULT_TS_MODEL_DIRECTORY = os.path.join(os.getcwd(), ".sagemaker", "ts", "models")
DEFAULT_TS_MODEL_NAME = "model"
DEFAULT_TS_MODEL_SERIALIZED_FILE = "model.pth"
DEFAULT_TS_CODE_DIR = "code"
DEFAULT_HANDLER_SERVICE = "sagemaker_pytorch_serving_container.handler_service"

Expand Down Expand Up @@ -117,14 +116,12 @@ def _adapt_to_ts_format(handler_service):
DEFAULT_TS_MODEL_NAME,
"--handler",
handler_service,
"--serialized-file",
os.path.join(environment.model_dir, DEFAULT_TS_MODEL_SERIALIZED_FILE),
"--export-path",
DEFAULT_TS_MODEL_DIRECTORY,
"--extra-files",
os.path.join(environment.model_dir, DEFAULT_TS_CODE_DIR, environment.Environment().module_name + ".py"),
"--version",
"1",
"--extra-files",
os.path.join(environment.model_dir)
]

logger.info(model_archiver_cmd)
Expand Down
2 changes: 2 additions & 0 deletions test/container/1.6.0/Dockerfile.dlc.cpu
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
ARG region
FROM 763104351884.dkr.ecr.$region.amazonaws.com/pytorch-inference:1.6.0-cpu-py3

RUN pip install --upgrade torch-model-archiver==0.3.1

COPY dist/sagemaker_pytorch_inference-*.tar.gz /sagemaker_pytorch_inference.tar.gz
RUN pip install --upgrade --no-cache-dir /sagemaker_pytorch_inference.tar.gz && \
rm /sagemaker_pytorch_inference.tar.gz
2 changes: 2 additions & 0 deletions test/container/1.6.0/Dockerfile.dlc.gpu
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
ARG region
FROM 763104351884.dkr.ecr.$region.amazonaws.com/pytorch-inference:1.6.0-gpu-py3

RUN pip install --upgrade torch-model-archiver==0.3.1

COPY dist/sagemaker_pytorch_inference-*.tar.gz /sagemaker_pytorch_inference.tar.gz
RUN pip install --upgrade --no-cache-dir /sagemaker_pytorch_inference.tar.gz && \
rm /sagemaker_pytorch_inference.tar.gz
5 changes: 3 additions & 2 deletions test/container/1.6.0/Dockerfile.pytorch
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,8 @@ FROM pytorch/pytorch:1.6.0-cuda10.1-cudnn7-runtime
LABEL com.amazonaws.sagemaker.capabilities.accept-bind-to-port=true
LABEL com.amazonaws.sagemaker.capabilities.multi-models=true

ARG TS_VERSION=0.1.1
ARG TS_VERSION=0.3.1
ARG TS_ARCHIVER_VERSION=0.3.1

ENV SAGEMAKER_SERVING_MODULE sagemaker_pytorch_serving_container.serving:main
ENV TEMP=/home/model-server/tmp
Expand All @@ -25,7 +26,7 @@ RUN conda install -c conda-forge opencv==4.0.1 \
&& ln -s /opt/conda/bin/pip /usr/local/bin/pip3

RUN pip install torchserve==$TS_VERSION \
&& pip install torch-model-archiver==$TS_VERSION
&& pip install torch-model-archiver==$TS_ARCHIVER_VERSION

COPY dist/sagemaker_pytorch_inference-*.tar.gz /sagemaker_pytorch_inference.tar.gz
RUN pip install --no-cache-dir /sagemaker_pytorch_inference.tar.gz && \
Expand Down
10 changes: 5 additions & 5 deletions test/integration/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,33 +28,33 @@
model_cpu_dir = os.path.join(mnist_path, cpu_sub_dir)
mnist_cpu_script = os.path.join(model_cpu_dir, code_sub_dir, 'mnist.py')
model_cpu_tar = file_utils.make_tarfile(mnist_cpu_script,
os.path.join(model_cpu_dir, "model.pth"),
os.path.join(model_cpu_dir, "torch_model.pth"),
model_cpu_dir,
script_path="code")

model_cpu_1d_dir = os.path.join(model_cpu_dir, '1d')
mnist_1d_script = os.path.join(model_cpu_1d_dir, code_sub_dir, 'mnist_1d.py')
model_cpu_1d_tar = file_utils.make_tarfile(mnist_1d_script,
os.path.join(model_cpu_1d_dir, "model.pth"),
os.path.join(model_cpu_1d_dir, "torch_model.pth"),
model_cpu_1d_dir,
script_path="code")

model_gpu_dir = os.path.join(mnist_path, gpu_sub_dir)
mnist_gpu_script = os.path.join(model_gpu_dir, code_sub_dir, 'mnist.py')
model_gpu_tar = file_utils.make_tarfile(mnist_gpu_script,
os.path.join(model_gpu_dir, "model.pth"),
os.path.join(model_gpu_dir, "torch_model.pth"),
model_gpu_dir,
script_path="code")

model_eia_dir = os.path.join(mnist_path, eia_sub_dir)
mnist_eia_script = os.path.join(model_eia_dir, 'mnist.py')
model_eia_tar = file_utils.make_tarfile(mnist_eia_script,
os.path.join(model_eia_dir, "model.pth"),
os.path.join(model_eia_dir, "torch_model.pth"),
model_eia_dir)

call_model_fn_once_script = os.path.join(model_cpu_dir, code_sub_dir, 'call_model_fn_once.py')
call_model_fn_once_tar = file_utils.make_tarfile(call_model_fn_once_script,
os.path.join(model_cpu_dir, "model.pth"),
os.path.join(model_cpu_dir, "torch_model.pth"),
model_cpu_dir,
"model_call_model_fn_once.tar.gz",
script_path="code")
Expand Down
2 changes: 1 addition & 1 deletion test/resources/mnist/model_cpu/1d/code/mnist_1d.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,6 @@ def forward(self, x):

def model_fn(model_dir):
model = torch.nn.DataParallel(Net())
with open(os.path.join(model_dir, 'model.pth'), 'rb') as f:
with open(os.path.join(model_dir, 'torch_model.pth'), 'rb') as f:
model.load_state_dict(torch.load(f))
return model
2 changes: 1 addition & 1 deletion test/resources/mnist/model_cpu/code/mnist.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,6 @@ def forward(self, x):
def model_fn(model_dir):
logger.info('model_fn')
model = torch.nn.DataParallel(Net())
with open(os.path.join(model_dir, 'model.pth'), 'rb') as f:
with open(os.path.join(model_dir, 'torch_model.pth'), 'rb') as f:
model.load_state_dict(torch.load(f))
return model
4 changes: 2 additions & 2 deletions test/resources/mnist/model_eia/mnist.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,11 +38,11 @@ def model_fn(model_dir):
logger.info('model_fn: Loading model with TorchScript from {}'.format(model_dir))
# Scripted model is serialized with torch.jit.save().
# No need to instantiate model definition then load state_dict
model = torch.jit.load('model.pth')
model = torch.jit.load('torch_model.pth')
return model


def save_model(model, model_dir):
logger.info("Saving the model to {}.".format(model_dir))
path = os.path.join(model_dir, 'model.pth')
path = os.path.join(model_dir, 'torch_model.pth')
torch.jit.save(model, path)
2 changes: 1 addition & 1 deletion test/resources/mnist/model_gpu/code/mnist.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,6 @@ def forward(self, x):
def model_fn(model_dir):
logger.info('model_fn')
model = torch.nn.DataParallel(Net())
with open(os.path.join(model_dir, 'model.pth'), 'rb') as f:
with open(os.path.join(model_dir, 'torch_model.pth'), 'rb') as f:
model.load_state_dict(torch.load(f))
return model
8 changes: 2 additions & 6 deletions test/unit/test_model_server.py
Original file line number Diff line number Diff line change
Expand Up @@ -145,16 +145,12 @@ def test_adapt_to_ts_format(path_exists, make_dir, subprocess_check_call, set_py
torchserve.DEFAULT_TS_MODEL_NAME,
"--handler",
handler_service,
"--serialized-file",
os.path.join(environment.model_dir, torchserve.DEFAULT_TS_MODEL_SERIALIZED_FILE),
"--export-path",
torchserve.DEFAULT_TS_MODEL_DIRECTORY,
"--extra-files",
os.path.join(environment.model_dir,
torchserve.DEFAULT_TS_CODE_DIR,
environment.Environment().module_name + ".py"),
"--version",
"1",
"--extra-files",
environment.model_dir
]

subprocess_check_call.assert_called_once_with(model_archiver_cmd)
Expand Down