Skip to content

Commit 9bb211a

Browse files
committed
Fixed flake errors
1 parent 900b406 commit 9bb211a

File tree

4 files changed

+9
-57
lines changed

4 files changed

+9
-57
lines changed

test/unit/test_default_inference_handler.py

-1
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,6 @@
1515
import csv
1616
import json
1717

18-
import mock
1918
import numpy as np
2019
import pytest
2120
import torch

test/unit/test_handler_service.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -31,8 +31,8 @@ def test_hosting_start_enable_multi_model(Transformer, DefaultPytorchInferenceHa
3131
from sagemaker_pytorch_serving_container import handler_service
3232

3333
context = Mock()
34-
context.system_properties.get.return_value ="/"
35-
handler_service.ENABLE_MULTI_MODEL=True
34+
context.system_properties.get.return_value = "/"
35+
handler_service.ENABLE_MULTI_MODEL = True
3636
handler = handler_service.HandlerService()
3737
handler.initialize(context)
38-
handler_service.ENABLE_MULTI_MODEL=False
38+
handler_service.ENABLE_MULTI_MODEL = False

test/unit/test_model_server.py

+5-53
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@
1010
# distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
1111
# ANY KIND, either express or implied. See the License for the specific
1212
# language governing permissions and limitations under the License.
13+
from __future__ import absolute_import
1314
import os
1415
import signal
1516
import subprocess
@@ -86,11 +87,9 @@ def test_start_torchserve_default_service_handler_multi_model(
8687
subprocess_popen,
8788
subprocess_call,
8889
):
89-
torchserve.ENABLE_MULTI_MODEL=True
90+
torchserve.ENABLE_MULTI_MODEL = True
9091
torchserve.start_torchserve()
91-
torchserve.ENABLE_MULTI_MODEL=False
92-
93-
#adapt.assert_called_once_with(torchserve.DEFAULT_HANDLER_SERVICE)
92+
torchserve.ENABLE_MULTI_MODEL = False
9493
create_config.assert_called_once_with()
9594
exists.assert_called_once_with(REQUIREMENTS_PATH)
9695
install_requirements.assert_called_once_with()
@@ -112,50 +111,6 @@ def test_start_torchserve_default_service_handler_multi_model(
112111
sigterm.assert_called_once_with(retrieve.return_value)
113112

114113

115-
@patch("subprocess.call")
116-
@patch("subprocess.Popen")
117-
@patch("sagemaker_pytorch_serving_container.torchserve._retrieve_ts_server_process")
118-
@patch("sagemaker_pytorch_serving_container.torchserve._add_sigterm_handler")
119-
@patch("sagemaker_pytorch_serving_container.torchserve._install_requirements")
120-
@patch("os.path.exists", return_value=True)
121-
@patch("sagemaker_pytorch_serving_container.torchserve._create_torchserve_config_file")
122-
@patch("sagemaker_pytorch_serving_container.torchserve._adapt_to_ts_format")
123-
def test_start_torchserve_default_service_handler(
124-
adapt,
125-
create_config,
126-
exists,
127-
install_requirements,
128-
sigterm,
129-
retrieve,
130-
subprocess_popen,
131-
subprocess_call,
132-
):
133-
torchserve.start_torchserve()
134-
135-
adapt.assert_called_once_with(torchserve.DEFAULT_HANDLER_SERVICE)
136-
create_config.assert_called_once_with()
137-
exists.assert_called_once_with(REQUIREMENTS_PATH)
138-
install_requirements.assert_called_once_with()
139-
140-
ts_model_server_cmd = [
141-
"torchserve",
142-
"--start",
143-
"--model-store",
144-
torchserve.MODEL_STORE,
145-
"--ts-config",
146-
torchserve.TS_CONFIG_FILE,
147-
"--log-config",
148-
torchserve.DEFAULT_TS_LOG_FILE,
149-
"--models",
150-
"model.mar"
151-
]
152-
153-
subprocess_popen.assert_called_once_with(ts_model_server_cmd)
154-
sigterm.assert_called_once_with(retrieve.return_value)
155-
156-
157-
158-
159114
@patch("subprocess.call")
160115
@patch("subprocess.Popen")
161116
@patch("sagemaker_pytorch_serving_container.torchserve._retrieve_ts_server_process")
@@ -190,7 +145,6 @@ def test_adapt_to_ts_format(path_exists, make_dir, subprocess_check_call, set_py
190145
torchserve.DEFAULT_TS_MODEL_NAME,
191146
"--handler",
192147
handler_service,
193-
#importlib.import_module(DEFAULT_TS_HANDLER_SERVICE).__file__,
194148
"--serialized-file",
195149
os.path.join(environment.model_dir, torchserve.DEFAULT_TS_MODEL_SERIALIZED_FILE),
196150
"--export-path",
@@ -288,15 +242,14 @@ def test_generate_ts_config_properties_default_workers(env, read_file):
288242
assert workers not in ts_config_properties
289243

290244

291-
292245
@patch("sagemaker_inference.utils.read_file", return_value=DEFAULT_CONFIGURATION)
293246
@patch("sagemaker_inference.environment.Environment")
294247
def test_generate_ts_config_properties_multi_model(env, read_file):
295248
env.return_value.torchserve_workers = None
296249

297-
torchserve.ENABLE_MULTI_MODEL=True
250+
torchserve.ENABLE_MULTI_MODEL = True
298251
ts_config_properties = torchserve._generate_ts_config_properties()
299-
torchserve.ENABLE_MULTI_MODEL=False
252+
torchserve.ENABLE_MULTI_MODEL = False
300253

301254
workers = "default_workers_per_model={}".format(None)
302255

@@ -306,7 +259,6 @@ def test_generate_ts_config_properties_multi_model(env, read_file):
306259
assert workers not in ts_config_properties
307260

308261

309-
310262
@patch("signal.signal")
311263
def test_add_sigterm_handler(signal_call):
312264
ts = Mock()

test/unit/test_serving.py

+1
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,7 @@ def test_hosting_start(start_torchserve):
2222
serving.main()
2323
start_torchserve.assert_called()
2424

25+
2526
def test_retry_if_error():
2627
from sagemaker_pytorch_serving_container import serving
2728
serving._retry_if_error(Exception)

0 commit comments

Comments
 (0)