@@ -45,6 +45,12 @@ def test_sync_fit_deploy(pytorch_training_job, sagemaker_session):
45
45
data = numpy .zeros (shape = (1 , 1 , 28 , 28 ))
46
46
predictor .predict (data )
47
47
48
+ batch_size = 100
49
+ data = numpy .rand (shape = (100 , 1 , 28 , 28 ))
50
+ output = predictor .predict (data )
51
+
52
+ assert numpy .asarray (output ).shape == (batch_size , 10 )
53
+
48
54
49
55
def test_deploy_model (pytorch_training_job , sagemaker_session ):
50
56
endpoint_name = 'test-pytorch-deploy-model-{}' .format (sagemaker_timestamp ())
@@ -55,8 +61,11 @@ def test_deploy_model(pytorch_training_job, sagemaker_session):
55
61
model = PyTorchModel (model_data , 'SageMakerRole' , entry_point = MNIST_SCRIPT , sagemaker_session = sagemaker_session )
56
62
predictor = model .deploy (1 , 'ml.m4.xlarge' , endpoint_name = endpoint_name )
57
63
58
- data = numpy .zeros (shape = (1 , 1 , 28 , 28 ))
59
- predictor .predict (data )
64
+ batch_size = 100
65
+ data = numpy .rand (shape = (100 , 1 , 28 , 28 ))
66
+ output = predictor .predict (data )
67
+
68
+ assert numpy .asarray (output ).shape == (batch_size , 10 )
60
69
61
70
62
71
def test_async_fit_deploy (sagemaker_session , pytorch_full_version ):
@@ -80,8 +89,12 @@ def test_async_fit_deploy(sagemaker_session, pytorch_full_version):
80
89
print ("Re-attaching now to: %s" % training_job_name )
81
90
estimator = PyTorch .attach (training_job_name = training_job_name , sagemaker_session = sagemaker_session )
82
91
predictor = estimator .deploy (1 , instance_type , endpoint_name = endpoint_name )
83
- data = numpy .zeros (shape = (1 , 1 , 28 , 28 ))
84
- predictor .predict (data )
92
+
93
+ batch_size = 100
94
+ data = numpy .rand (shape = (100 , 1 , 28 , 28 ))
95
+ output = predictor .predict (data )
96
+
97
+ assert numpy .asarray (output ).shape == (batch_size , 10 )
85
98
86
99
87
100
# TODO(nadiaya): Run against local mode when errors will be propagated
0 commit comments