diff --git a/tests/integ/__init__.py b/tests/integ/__init__.py index 3243b27537..e8ed234689 100644 --- a/tests/integ/__init__.py +++ b/tests/integ/__init__.py @@ -23,9 +23,11 @@ TUNING_DEFAULT_TIMEOUT_MINUTES = 20 TRANSFORM_DEFAULT_TIMEOUT_MINUTES = 20 PYTHON_VERSION = 'py' + str(sys.version_info.major) -HOSTING_P2_UNAVAILABLE_REGIONS = ['ca-central-1', 'us-west-1', 'eu-west-2'] -HOSTING_P3_UNAVAILABLE_REGIONS = ['ap-southeast-1', 'ap-southeast-2', 'ap-south-1', 'ca-central-1', - 'eu-west-2', 'us-west-1'] + +# 'eu-central-1' has some p2, but no enough for continuous testing +HOSTING_NO_P2_REGIONS = ['ca-central-1', 'eu-west-2', 'us-west-1', 'eu-central-1'] +HOSTING_NO_P3_REGIONS = ['ap-southeast-1', 'ap-southeast-2', 'ap-south-1', 'ca-central-1', + 'eu-west-2', 'us-west-1'] logging.getLogger('boto3').setLevel(logging.INFO) logging.getLogger('botocore').setLevel(logging.INFO) diff --git a/tests/integ/test_byo_estimator.py b/tests/integ/test_byo_estimator.py index 40e97e9321..fac7863787 100644 --- a/tests/integ/test_byo_estimator.py +++ b/tests/integ/test_byo_estimator.py @@ -23,7 +23,7 @@ import sagemaker from sagemaker.amazon.amazon_estimator import registry from sagemaker.estimator import Estimator -from sagemaker.utils import name_from_base +from sagemaker.utils import unique_name_from_base from tests.integ import DATA_DIR, TRAINING_DEFAULT_TIMEOUT_MINUTES from tests.integ.timeout import timeout, timeout_and_delete_endpoint_by_name @@ -81,7 +81,7 @@ def test_byo_estimator(sagemaker_session, region): # training labels must be 'float32' estimator.fit({'train': s3_train_data}) - endpoint_name = name_from_base('byo') + endpoint_name = unique_name_from_base('byo') with timeout_and_delete_endpoint_by_name(endpoint_name, sagemaker_session): model = estimator.create_model() @@ -99,7 +99,7 @@ def test_byo_estimator(sagemaker_session, region): def test_async_byo_estimator(sagemaker_session, region): image_name = registry(region) + "/factorization-machines:1" - endpoint_name = name_from_base('byo') + endpoint_name = unique_name_from_base('byo') training_data_path = os.path.join(DATA_DIR, 'dummy_tensor') training_job_name = "" diff --git a/tests/integ/test_chainer_train.py b/tests/integ/test_chainer_train.py index 73e7ff8da2..80b5349c84 100644 --- a/tests/integ/test_chainer_train.py +++ b/tests/integ/test_chainer_train.py @@ -36,8 +36,8 @@ def test_distributed_cpu_training(sagemaker_session, chainer_full_version): _run_mnist_training_job(sagemaker_session, "ml.c4.xlarge", 2, chainer_full_version) -@pytest.mark.skipif(tests.integ.test_region() in ['us-west-1', 'eu-west-2', 'ca-central-1'], - reason='No ml.p2.xlarge supported in these regions') +@pytest.mark.skipif(tests.integ.test_region() in tests.integ.HOSTING_NO_P2_REGIONS, + reason='no ml.p2 instances in these regions') def test_distributed_gpu_training(sagemaker_session, chainer_full_version): _run_mnist_training_job(sagemaker_session, "ml.p2.xlarge", 2, chainer_full_version) diff --git a/tests/integ/test_factorization_machines.py b/tests/integ/test_factorization_machines.py index 600fadced0..76de74bc90 100644 --- a/tests/integ/test_factorization_machines.py +++ b/tests/integ/test_factorization_machines.py @@ -21,7 +21,7 @@ import pytest from sagemaker import FactorizationMachines, FactorizationMachinesModel -from sagemaker.utils import name_from_base +from sagemaker.utils import unique_name_from_base from tests.integ import DATA_DIR, TRAINING_DEFAULT_TIMEOUT_MINUTES from tests.integ.timeout import timeout, timeout_and_delete_endpoint_by_name @@ -45,7 +45,7 @@ def test_factorization_machines(sagemaker_session): # training labels must be 'float32' fm.fit(fm.record_set(train_set[0][:200], train_set[1][:200].astype('float32'))) - endpoint_name = name_from_base('fm') + endpoint_name = unique_name_from_base('fm') with timeout_and_delete_endpoint_by_name(endpoint_name, sagemaker_session): model = FactorizationMachinesModel(fm.model_data, role='SageMakerRole', sagemaker_session=sagemaker_session) predictor = model.deploy(1, 'ml.c4.xlarge', endpoint_name=endpoint_name) @@ -58,7 +58,7 @@ def test_factorization_machines(sagemaker_session): def test_async_factorization_machines(sagemaker_session): training_job_name = "" - endpoint_name = name_from_base('factorizationMachines') + endpoint_name = unique_name_from_base('factorizationMachines') with timeout(minutes=5): data_path = os.path.join(DATA_DIR, 'one_p_mnist', 'mnist.pkl.gz') diff --git a/tests/integ/test_ipinsights.py b/tests/integ/test_ipinsights.py index a512899194..a93c8816fe 100644 --- a/tests/integ/test_ipinsights.py +++ b/tests/integ/test_ipinsights.py @@ -17,7 +17,7 @@ from sagemaker import IPInsights, IPInsightsModel from sagemaker.predictor import RealTimePredictor -from sagemaker.utils import name_from_base +from sagemaker.utils import unique_name_from_base from tests.integ import DATA_DIR, TRAINING_DEFAULT_TIMEOUT_MINUTES from tests.integ.record_set import prepare_record_set_from_local_files from tests.integ.timeout import timeout, timeout_and_delete_endpoint_by_name @@ -47,7 +47,7 @@ def test_ipinsights(sagemaker_session): num_records, FEATURE_DIM, sagemaker_session) ipinsights.fit(record_set, None) - endpoint_name = name_from_base('ipinsights') + endpoint_name = unique_name_from_base('ipinsights') with timeout_and_delete_endpoint_by_name(endpoint_name, sagemaker_session): model = IPInsightsModel(ipinsights.model_data, role='SageMakerRole', sagemaker_session=sagemaker_session) predictor = model.deploy(1, 'ml.c4.xlarge', endpoint_name=endpoint_name) diff --git a/tests/integ/test_kmeans.py b/tests/integ/test_kmeans.py index 6cbef96036..85f6b247e8 100644 --- a/tests/integ/test_kmeans.py +++ b/tests/integ/test_kmeans.py @@ -21,7 +21,7 @@ import pytest from sagemaker import KMeans, KMeansModel -from sagemaker.utils import name_from_base +from sagemaker.utils import unique_name_from_base from tests.integ import DATA_DIR, TRAINING_DEFAULT_TIMEOUT_MINUTES from tests.integ.timeout import timeout, timeout_and_delete_endpoint_by_name @@ -64,7 +64,7 @@ def test_kmeans(sagemaker_session): kmeans.fit(kmeans.record_set(train_set[0][:100])) - endpoint_name = name_from_base('kmeans') + endpoint_name = unique_name_from_base('kmeans') with timeout_and_delete_endpoint_by_name(endpoint_name, sagemaker_session): model = KMeansModel(kmeans.model_data, role='SageMakerRole', sagemaker_session=sagemaker_session) predictor = model.deploy(1, 'ml.c4.xlarge', endpoint_name=endpoint_name) @@ -78,7 +78,7 @@ def test_kmeans(sagemaker_session): def test_async_kmeans(sagemaker_session): training_job_name = "" - endpoint_name = name_from_base('kmeans') + endpoint_name = unique_name_from_base('kmeans') with timeout(minutes=5): data_path = os.path.join(DATA_DIR, 'one_p_mnist', 'mnist.pkl.gz') diff --git a/tests/integ/test_knn.py b/tests/integ/test_knn.py index d4655f4b5c..4f4e3ad47a 100644 --- a/tests/integ/test_knn.py +++ b/tests/integ/test_knn.py @@ -21,7 +21,7 @@ import pytest from sagemaker import KNN, KNNModel -from sagemaker.utils import name_from_base +from sagemaker.utils import unique_name_from_base from tests.integ import DATA_DIR, TRAINING_DEFAULT_TIMEOUT_MINUTES from tests.integ.timeout import timeout, timeout_and_delete_endpoint_by_name @@ -44,7 +44,7 @@ def test_knn_regressor(sagemaker_session): # training labels must be 'float32' knn.fit(knn.record_set(train_set[0][:200], train_set[1][:200].astype('float32'))) - endpoint_name = name_from_base('knn') + endpoint_name = unique_name_from_base('knn') with timeout_and_delete_endpoint_by_name(endpoint_name, sagemaker_session): model = KNNModel(knn.model_data, role='SageMakerRole', sagemaker_session=sagemaker_session) predictor = model.deploy(1, 'ml.c4.xlarge', endpoint_name=endpoint_name) @@ -57,7 +57,7 @@ def test_knn_regressor(sagemaker_session): def test_async_knn_classifier(sagemaker_session): training_job_name = "" - endpoint_name = name_from_base('knn') + endpoint_name = unique_name_from_base('knn') with timeout(minutes=5): data_path = os.path.join(DATA_DIR, 'one_p_mnist', 'mnist.pkl.gz') diff --git a/tests/integ/test_lda.py b/tests/integ/test_lda.py index 7a198d8c8b..1553436420 100644 --- a/tests/integ/test_lda.py +++ b/tests/integ/test_lda.py @@ -19,7 +19,7 @@ from sagemaker import LDA, LDAModel from sagemaker.amazon.common import read_records -from sagemaker.utils import name_from_base +from sagemaker.utils import unique_name_from_base from tests.integ import DATA_DIR, TRAINING_DEFAULT_TIMEOUT_MINUTES from tests.integ.timeout import timeout, timeout_and_delete_endpoint_by_name from tests.integ.record_set import prepare_record_set_from_local_files @@ -44,7 +44,7 @@ def test_lda(sagemaker_session): len(all_records), feature_num, sagemaker_session) lda.fit(record_set, 100) - endpoint_name = name_from_base('lda') + endpoint_name = unique_name_from_base('lda') with timeout_and_delete_endpoint_by_name(endpoint_name, sagemaker_session): model = LDAModel(lda.model_data, role='SageMakerRole', sagemaker_session=sagemaker_session) predictor = model.deploy(1, 'ml.c4.xlarge', endpoint_name=endpoint_name) diff --git a/tests/integ/test_linear_learner.py b/tests/integ/test_linear_learner.py index d5d94fe8a8..bd049ee8c4 100644 --- a/tests/integ/test_linear_learner.py +++ b/tests/integ/test_linear_learner.py @@ -22,7 +22,7 @@ import pytest from sagemaker.amazon.linear_learner import LinearLearner, LinearLearnerModel -from sagemaker.utils import name_from_base, sagemaker_timestamp +from sagemaker.utils import unique_name_from_base, sagemaker_timestamp from tests.integ import DATA_DIR, TRAINING_DEFAULT_TIMEOUT_MINUTES from tests.integ.timeout import timeout, timeout_and_delete_endpoint_by_name @@ -80,7 +80,7 @@ def test_linear_learner(sagemaker_session): ll.early_stopping_patience = 3 ll.fit(ll.record_set(train_set[0][:200], train_set[1][:200])) - endpoint_name = name_from_base('linear-learner') + endpoint_name = unique_name_from_base('linear-learner') with timeout_and_delete_endpoint_by_name(endpoint_name, sagemaker_session): predictor = ll.deploy(1, 'ml.c4.xlarge', endpoint_name=endpoint_name) @@ -109,7 +109,7 @@ def test_linear_learner_multiclass(sagemaker_session): ll.epochs = 1 ll.fit(ll.record_set(train_set[0][:200], train_set[1][:200])) - endpoint_name = name_from_base('linear-learner') + endpoint_name = unique_name_from_base('linear-learner') with timeout_and_delete_endpoint_by_name(endpoint_name, sagemaker_session): predictor = ll.deploy(1, 'ml.c4.xlarge', endpoint_name=endpoint_name) diff --git a/tests/integ/test_ntm.py b/tests/integ/test_ntm.py index eeb56c2cf0..bc50b4e30b 100644 --- a/tests/integ/test_ntm.py +++ b/tests/integ/test_ntm.py @@ -19,7 +19,7 @@ from sagemaker import NTM, NTMModel from sagemaker.amazon.common import read_records -from sagemaker.utils import name_from_base +from sagemaker.utils import unique_name_from_base from tests.integ import DATA_DIR, TRAINING_DEFAULT_TIMEOUT_MINUTES from tests.integ.timeout import timeout, timeout_and_delete_endpoint_by_name from tests.integ.record_set import prepare_record_set_from_local_files @@ -44,7 +44,7 @@ def test_ntm(sagemaker_session): len(all_records), feature_num, sagemaker_session) ntm.fit(record_set, None) - endpoint_name = name_from_base('ntm') + endpoint_name = unique_name_from_base('ntm') with timeout_and_delete_endpoint_by_name(endpoint_name, sagemaker_session): model = NTMModel(ntm.model_data, role='SageMakerRole', sagemaker_session=sagemaker_session) predictor = model.deploy(1, 'ml.c4.xlarge', endpoint_name=endpoint_name) diff --git a/tests/integ/test_object2vec.py b/tests/integ/test_object2vec.py index 3693591714..1ed32abc89 100644 --- a/tests/integ/test_object2vec.py +++ b/tests/integ/test_object2vec.py @@ -17,7 +17,7 @@ from sagemaker.predictor import RealTimePredictor from sagemaker import Object2Vec, Object2VecModel -from sagemaker.utils import name_from_base +from sagemaker.utils import unique_name_from_base from tests.integ import DATA_DIR, TRAINING_DEFAULT_TIMEOUT_MINUTES from tests.integ.timeout import timeout, timeout_and_delete_endpoint_by_name from tests.integ.record_set import prepare_record_set_from_local_files @@ -52,7 +52,7 @@ def test_object2vec(sagemaker_session): object2vec.fit(record_set, None) - endpoint_name = name_from_base('object2vec') + endpoint_name = unique_name_from_base('object2vec') with timeout_and_delete_endpoint_by_name(endpoint_name, sagemaker_session): model = Object2VecModel(object2vec.model_data, role='SageMakerRole', sagemaker_session=sagemaker_session) diff --git a/tests/integ/test_pca.py b/tests/integ/test_pca.py index 76a17eb595..68b9acf631 100644 --- a/tests/integ/test_pca.py +++ b/tests/integ/test_pca.py @@ -21,7 +21,7 @@ import pytest import sagemaker.amazon.pca -from sagemaker.utils import name_from_base +from sagemaker.utils import unique_name_from_base from tests.integ import DATA_DIR, TRAINING_DEFAULT_TIMEOUT_MINUTES from tests.integ.timeout import timeout, timeout_and_delete_endpoint_by_name @@ -45,7 +45,7 @@ def test_pca(sagemaker_session): pca.extra_components = 5 pca.fit(pca.record_set(train_set[0][:100])) - endpoint_name = name_from_base('pca') + endpoint_name = unique_name_from_base('pca') with timeout_and_delete_endpoint_by_name(endpoint_name, sagemaker_session): pca_model = sagemaker.amazon.pca.PCAModel(model_data=pca.model_data, role='SageMakerRole', sagemaker_session=sagemaker_session) @@ -61,7 +61,7 @@ def test_pca(sagemaker_session): def test_async_pca(sagemaker_session): training_job_name = "" - endpoint_name = name_from_base('pca') + endpoint_name = unique_name_from_base('pca') with timeout(minutes=5): data_path = os.path.join(DATA_DIR, 'one_p_mnist', 'mnist.pkl.gz') diff --git a/tests/integ/test_pytorch_train.py b/tests/integ/test_pytorch_train.py index 85cbcf6978..6139b3396a 100644 --- a/tests/integ/test_pytorch_train.py +++ b/tests/integ/test_pytorch_train.py @@ -75,8 +75,8 @@ def test_deploy_model(pytorch_training_job, sagemaker_session): assert output.shape == (batch_size, 10) -@pytest.mark.skipif(tests.integ.test_region() in ['us-west-1', 'eu-west-2', 'ca-central-1'], - reason='No ml.p2.xlarge supported in these regions') +@pytest.mark.skipif(tests.integ.test_region() in tests.integ.HOSTING_NO_P2_REGIONS, + reason='no ml.p2 instances in these regions') def test_async_fit_deploy(sagemaker_session, pytorch_full_version): training_job_name = "" # TODO: add tests against local mode when it's ready to be used diff --git a/tests/integ/test_randomcutforest.py b/tests/integ/test_randomcutforest.py index c01d18aaa8..bbcea51462 100644 --- a/tests/integ/test_randomcutforest.py +++ b/tests/integ/test_randomcutforest.py @@ -16,7 +16,7 @@ import pytest from sagemaker import RandomCutForest, RandomCutForestModel -from sagemaker.utils import name_from_base +from sagemaker.utils import unique_name_from_base from tests.integ import TRAINING_DEFAULT_TIMEOUT_MINUTES from tests.integ.timeout import timeout, timeout_and_delete_endpoint_by_name @@ -34,7 +34,7 @@ def test_randomcutforest(sagemaker_session): rcf.fit(rcf.record_set(train_input)) - endpoint_name = name_from_base('randomcutforest') + endpoint_name = unique_name_from_base('randomcutforest') with timeout_and_delete_endpoint_by_name(endpoint_name, sagemaker_session): model = RandomCutForestModel(rcf.model_data, role='SageMakerRole', sagemaker_session=sagemaker_session) predictor = model.deploy(1, 'ml.c4.xlarge', endpoint_name=endpoint_name) diff --git a/tests/integ/test_tf_cifar.py b/tests/integ/test_tf_cifar.py index 4a9805b8b7..3046675be3 100644 --- a/tests/integ/test_tf_cifar.py +++ b/tests/integ/test_tf_cifar.py @@ -37,8 +37,8 @@ def __call__(self, data): @pytest.mark.continuous_testing @pytest.mark.skipif(tests.integ.PYTHON_VERSION != 'py2', reason="TensorFlow image supports only python 2.") -@pytest.mark.skipif(tests.integ.test_region() in ['us-west-1', 'eu-west-2', 'ca-central-1'], - reason='No ml.p2.xlarge supported in these regions') +@pytest.mark.skipif(tests.integ.test_region() in tests.integ.HOSTING_NO_P2_REGIONS, + reason='no ml.p2 instances in these regions') def test_cifar(sagemaker_session, tf_full_version): with timeout(minutes=45): script_path = os.path.join(tests.integ.DATA_DIR, 'cifar_10', 'source') diff --git a/tests/integ/test_tf_keras.py b/tests/integ/test_tf_keras.py index 02296bfdd9..6e36a87991 100644 --- a/tests/integ/test_tf_keras.py +++ b/tests/integ/test_tf_keras.py @@ -26,8 +26,8 @@ @pytest.mark.continuous_testing @pytest.mark.skipif(tests.integ.PYTHON_VERSION != 'py2', reason="TensorFlow image supports only python 2.") -@pytest.mark.skipif(tests.integ.test_region() in ['us-west-1', 'eu-west-2', 'ca-central-1'], - reason='No ml.p2.xlarge supported in these regions') +@pytest.mark.skipif(tests.integ.test_region() in tests.integ.HOSTING_NO_P2_REGIONS, + reason='no ml.p2 instances in these regions') def test_keras(sagemaker_session, tf_full_version): script_path = os.path.join(tests.integ.DATA_DIR, 'cifar_10', 'source') dataset_path = os.path.join(tests.integ.DATA_DIR, 'cifar_10', 'data') diff --git a/tests/integ/test_tfs.py b/tests/integ/test_tfs.py index fa1c214f13..614a4c9240 100644 --- a/tests/integ/test_tfs.py +++ b/tests/integ/test_tfs.py @@ -26,7 +26,7 @@ 'ml.c5.xlarge', pytest.param('ml.p3.2xlarge', marks=pytest.mark.skipif( - tests.integ.test_region() in tests.integ.HOSTING_P3_UNAVAILABLE_REGIONS, + tests.integ.test_region() in tests.integ.HOSTING_NO_P3_REGIONS, reason='no ml.p3 instances in this region'))]) def instance_type(request): return request.param