27
27
28
28
PING_URL = "http://localhost:8080/ping"
29
29
INVOCATION_URL = "http://localhost:8080/models/{}/invoke"
30
- MODEL_NAME = "half_plus_three"
31
-
30
+ MODEL_NAME_1 = "half_plus_three"
31
+ MODEL_NAME_2 = "half_plus_two"
32
32
33
33
@pytest .fixture (scope = "session" , autouse = True )
34
34
def volume ():
@@ -74,14 +74,21 @@ def container(docker_base_name, tag, runtime_config):
74
74
75
75
76
76
@pytest .fixture
77
- def model ():
77
+ def model1 ():
78
78
model_data = {
79
- "model_name" : MODEL_NAME ,
79
+ "model_name" : MODEL_NAME_1 ,
80
80
"url" : "/opt/ml/models/half_plus_three/model/half_plus_three"
81
81
}
82
82
make_load_model_request (json .dumps (model_data ))
83
- return MODEL_NAME
83
+ return MODEL_NAME_1
84
84
85
+ def model2 ():
86
+ model_data = {
87
+ "model_name" : MODEL_NAME_2 ,
88
+ "url" : "/opt/ml/models/half_plus_two/model/half_plus_two"
89
+ }
90
+ make_load_model_request (json .dumps (model_data ))
91
+ return MODEL_NAME_2
85
92
86
93
@pytest .mark .skip_gpu
87
94
def test_ping_service ():
@@ -90,20 +97,24 @@ def test_ping_service():
90
97
91
98
92
99
@pytest .mark .skip_gpu
93
- def test_predict_json (model ):
100
+ def test_predict_json (model1 , model2 ):
94
101
headers = make_headers ()
95
102
data = "{\" instances\" : [1.0, 2.0, 5.0]}"
96
- response = requests .post (INVOCATION_URL .format (model ), data = data , headers = headers ).json ()
97
- assert response == {"predictions" : [3.5 , 4.0 , 5.5 ]}
98
-
103
+ response1 = requests .post (INVOCATION_URL .format (model1 ), data = data , headers = headers ).json ()
104
+ assert response1 == {"predictions" : [3.5 , 4.0 , 5.5 ]}
105
+ response2 = requests .post (INVOCATION_URL .format (model2 ), data = data , headers = headers ).json ()
106
+ assert response1 == {"predictions" : [2.5 , 3.0 , 4.5 ]}
99
107
100
108
@pytest .mark .skip_gpu
101
109
def test_zero_content ():
102
110
headers = make_headers ()
103
111
x = ""
104
- response = requests .post (INVOCATION_URL .format (MODEL_NAME ), data = x , headers = headers )
105
- assert 500 == response .status_code
106
- assert "document is empty" in response .text
112
+ response1 = requests .post (INVOCATION_URL .format (MODEL_NAME_1 ), data = x , headers = headers )
113
+ assert 500 == response1 .status_code
114
+ assert "document is empty" in response1 .text
115
+ response2 = requests .post (INVOCATION_URL .format (MODEL_NAME_2 ), data = x , headers = headers )
116
+ assert 500 == response2 .status_code
117
+ assert "document is empty" in response2 .text
107
118
108
119
109
120
@pytest .mark .skip_gpu
@@ -113,34 +124,46 @@ def test_large_input():
113
124
with open (data_file , "r" ) as file :
114
125
x = file .read ()
115
126
headers = make_headers (content_type = "text/csv" )
116
- response = requests .post (INVOCATION_URL .format (MODEL_NAME ), data = x , headers = headers ).json ()
117
- predictions = response ["predictions" ]
118
- assert len (predictions ) == 753936
127
+ response1 = requests .post (INVOCATION_URL .format (MODEL_NAME_1 ), data = x , headers = headers ).json ()
128
+ predictions1 = response1 ["predictions" ]
129
+ assert len (predictions1 ) == 753936
130
+ response2 = requests .post (INVOCATION_URL .format (MODEL_NAME_2 ), data = x , headers = headers ).json ()
131
+ predictions2 = response2 ["predictions" ]
132
+ assert len (predictions2 ) == 753936
119
133
120
134
121
135
@pytest .mark .skip_gpu
122
136
def test_csv_input ():
123
137
headers = make_headers (content_type = "text/csv" )
124
138
data = "1.0,2.0,5.0"
125
- response = requests .post (INVOCATION_URL .format (MODEL_NAME ), data = data , headers = headers ).json ()
126
- assert response == {"predictions" : [3.5 , 4.0 , 5.5 ]}
139
+ response1 = requests .post (INVOCATION_URL .format (MODEL_NAME_1 ), data = data , headers = headers ).json ()
140
+ assert response1 == {"predictions" : [3.5 , 4.0 , 5.5 ]}
141
+ response2 = requests .post (INVOCATION_URL .format (MODEL_NAME_2 ), data = data , headers = headers ).json ()
142
+ assert response2 == {"predictions" : [2.5 , 3.0 , 4.5 ]}
127
143
128
144
129
145
@pytest .mark .skip_gpu
130
146
def test_specific_versions ():
131
147
for version in ("123" , "124" ):
132
148
headers = make_headers (content_type = "text/csv" , version = version )
133
149
data = "1.0,2.0,5.0"
134
- response = requests .post (
135
- INVOCATION_URL .format (MODEL_NAME ), data = data , headers = headers
150
+ response1 = requests .post (
151
+ INVOCATION_URL .format (MODEL_NAME_1 ), data = data , headers = headers
152
+ ).json ()
153
+ assert response1 == {"predictions" : [3.5 , 4.0 , 5.5 ]}
154
+ response2 = requests .post (
155
+ INVOCATION_URL .format (MODEL_NAME_2 ), data = data , headers = headers
136
156
).json ()
137
- assert response == {"predictions" : [3 .5 , 4 .0 , 5 .5 ]}
157
+ assert response2 == {"predictions" : [2 .5 , 3 .0 , 4 .5 ]}
138
158
139
159
140
160
@pytest .mark .skip_gpu
141
161
def test_unsupported_content_type ():
142
162
headers = make_headers ("unsupported-type" , "predict" )
143
163
data = "aW1hZ2UgYnl0ZXM="
144
- response = requests .post (INVOCATION_URL .format (MODEL_NAME ), data = data , headers = headers )
145
- assert 500 == response .status_code
146
- assert "unsupported content type" in response .text
164
+ response1 = requests .post (INVOCATION_URL .format (MODEL_NAME_1 ), data = data , headers = headers )
165
+ assert 500 == response1 .status_code
166
+ assert "unsupported content type" in response1 .text
167
+ response2 = requests .post (INVOCATION_URL .format (MODEL_NAME_2 ), data = data , headers = headers )
168
+ assert 500 == response2 .status_code
169
+ assert "unsupported content type" in response2 .text
0 commit comments