@@ -174,7 +174,10 @@ def test_auto_ml_v2_attach(problem_type, job_name_fixture_key, sagemaker_session
174
174
assert desc ["AutoMLJobName" ] == job_name
175
175
assert desc ["AutoMLJobStatus" ] in ["InProgress" , "Completed" ]
176
176
assert desc ["AutoMLJobSecondaryStatus" ] != "Failed"
177
- assert desc ["ProblemConfig" ] == auto_ml_v2_utils .PROBLEM_CONFIGS [problem_type ]
177
+ assert (
178
+ desc ["AutoMLProblemTypeConfig" ]
179
+ == auto_ml_v2_utils .PROBLEM_CONFIGS [problem_type ].to_request_dict ()
180
+ )
178
181
assert desc ["OutputDataConfig" ] == expected_default_output_config
179
182
180
183
@@ -251,6 +254,8 @@ def test_list_candidates(
251
254
252
255
candidates = auto_ml .list_candidates (job_name = job_name )
253
256
assert len (candidates ) == num_candidates
257
+ else :
258
+ pytest .skip ("The job hasn't finished yet" )
254
259
255
260
256
261
@pytest .mark .skipif (
@@ -320,6 +325,8 @@ def test_best_candidate(
320
325
best_candidate = auto_ml .best_candidate (job_name = job_name )
321
326
assert len (best_candidate ["InferenceContainers" ]) == num_containers
322
327
assert best_candidate ["CandidateStatus" ] == "Completed"
328
+ else :
329
+ pytest .skip ("The job hasn't finished yet" )
323
330
324
331
325
332
@pytest .mark .skipif (
@@ -411,6 +418,8 @@ def test_deploy_best_candidate(
411
418
)["EndpointStatus" ]
412
419
assert endpoint_status == "InService"
413
420
sagemaker_session .sagemaker_client .delete_endpoint (EndpointName = endpoint_name )
421
+ else :
422
+ pytest .skip ("The job hasn't finished yet" )
414
423
415
424
416
425
@pytest .mark .skipif (
@@ -482,3 +491,5 @@ def test_candidate_estimator_get_steps(
482
491
candidate_estimator = CandidateEstimator (candidate , sagemaker_session )
483
492
steps = candidate_estimator .get_steps ()
484
493
assert len (steps ) == num_steps
494
+ else :
495
+ pytest .skip ("The job hasn't finished yet" )
0 commit comments