@@ -174,7 +174,9 @@ def test_auto_ml_v2_attach(problem_type, job_name_fixture_key, sagemaker_session
174
174
assert desc ["AutoMLJobName" ] == job_name
175
175
assert desc ["AutoMLJobStatus" ] in ["InProgress" , "Completed" ]
176
176
assert desc ["AutoMLJobSecondaryStatus" ] != "Failed"
177
- assert desc ["ProblemConfig" ] == auto_ml_v2_utils .PROBLEM_CONFIGS [problem_type ]
177
+ assert sorted (desc ["AutoMLProblemTypeConfig" ]) == sorted (
178
+ auto_ml_v2_utils .PROBLEM_CONFIGS [problem_type ]
179
+ )
178
180
assert desc ["OutputDataConfig" ] == expected_default_output_config
179
181
180
182
@@ -251,6 +253,8 @@ def test_list_candidates(
251
253
252
254
candidates = auto_ml .list_candidates (job_name = job_name )
253
255
assert len (candidates ) == num_candidates
256
+ else :
257
+ pytest .skip ("The job hasn't finished yet" )
254
258
255
259
256
260
@pytest .mark .skipif (
@@ -320,6 +324,8 @@ def test_best_candidate(
320
324
best_candidate = auto_ml .best_candidate (job_name = job_name )
321
325
assert len (best_candidate ["InferenceContainers" ]) == num_containers
322
326
assert best_candidate ["CandidateStatus" ] == "Completed"
327
+ else :
328
+ pytest .skip ("The job hasn't finished yet" )
323
329
324
330
325
331
@pytest .mark .skipif (
@@ -411,6 +417,8 @@ def test_deploy_best_candidate(
411
417
)["EndpointStatus" ]
412
418
assert endpoint_status == "InService"
413
419
sagemaker_session .sagemaker_client .delete_endpoint (EndpointName = endpoint_name )
420
+ else :
421
+ pytest .skip ("The job hasn't finished yet" )
414
422
415
423
416
424
@pytest .mark .skipif (
@@ -482,3 +490,5 @@ def test_candidate_estimator_get_steps(
482
490
candidate_estimator = CandidateEstimator (candidate , sagemaker_session )
483
491
steps = candidate_estimator .get_steps ()
484
492
assert len (steps ) == num_steps
493
+ else :
494
+ pytest .skip ("The job hasn't finished yet" )
0 commit comments