From 5b375029931d9a83d0522a1208c9148d332e4db9 Mon Sep 17 00:00:00 2001 From: Lauren Yu <6631887+laurenyu@users.noreply.github.com> Date: Mon, 6 Jul 2020 09:58:51 -0700 Subject: [PATCH 1/5] breaking: rename image to image_uri for models --- src/sagemaker/amazon/amazon_estimator.py | 2 +- .../amazon/factorization_machines.py | 4 +- src/sagemaker/amazon/ipinsights.py | 4 +- src/sagemaker/amazon/kmeans.py | 4 +- src/sagemaker/amazon/lda.py | 4 +- src/sagemaker/amazon/linear_learner.py | 4 +- src/sagemaker/amazon/ntm.py | 4 +- src/sagemaker/amazon/object2vec.py | 4 +- src/sagemaker/amazon/pca.py | 4 +- src/sagemaker/amazon/randomcutforest.py | 4 +- src/sagemaker/automl/automl.py | 4 +- src/sagemaker/automl/candidate_estimator.py | 37 ++++----- src/sagemaker/chainer/estimator.py | 6 +- src/sagemaker/chainer/model.py | 16 ++-- src/sagemaker/estimator.py | 31 ++------ src/sagemaker/model.py | 46 +++++------ src/sagemaker/multidatamodel.py | 21 ++--- src/sagemaker/mxnet/estimator.py | 6 +- src/sagemaker/mxnet/model.py | 16 ++-- src/sagemaker/pytorch/estimator.py | 6 +- src/sagemaker/pytorch/model.py | 16 ++-- src/sagemaker/rl/estimator.py | 4 +- src/sagemaker/session.py | 76 ++++++++++--------- src/sagemaker/sklearn/estimator.py | 6 +- src/sagemaker/sklearn/model.py | 16 ++-- src/sagemaker/sparkml/model.py | 6 +- src/sagemaker/tensorflow/estimator.py | 7 +- src/sagemaker/tensorflow/model.py | 30 ++++---- src/sagemaker/tuner.py | 2 +- src/sagemaker/workflow/airflow.py | 22 +++--- src/sagemaker/xgboost/estimator.py | 6 +- src/sagemaker/xgboost/model.py | 10 +-- tests/integ/test_inference_pipeline.py | 6 +- tests/integ/test_multidatamodel.py | 8 +- tests/unit/sagemaker/model/test_neo.py | 2 +- .../sagemaker/tensorflow/test_estimator.py | 4 +- tests/unit/sagemaker/tensorflow/test_tfs.py | 14 ++-- tests/unit/test_airflow.py | 12 +-- tests/unit/test_chainer.py | 4 +- tests/unit/test_endpoint_from_job.py | 4 +- tests/unit/test_endpoint_from_model_data.py | 6 +- tests/unit/test_estimator.py | 15 ++-- tests/unit/test_fm.py | 2 +- tests/unit/test_ipinsights.py | 2 +- tests/unit/test_job.py | 2 +- tests/unit/test_kmeans.py | 2 +- tests/unit/test_knn.py | 2 +- tests/unit/test_lda.py | 2 +- tests/unit/test_linear_learner.py | 2 +- tests/unit/test_multidatamodel.py | 16 ++-- tests/unit/test_mxnet.py | 14 ++-- tests/unit/test_ntm.py | 2 +- tests/unit/test_object2vec.py | 2 +- tests/unit/test_pca.py | 2 +- tests/unit/test_pipeline_model.py | 4 +- tests/unit/test_pytorch.py | 8 +- tests/unit/test_randomcutforest.py | 2 +- tests/unit/test_rl.py | 8 +- tests/unit/test_session.py | 28 +++---- tests/unit/test_sklearn.py | 12 +-- tests/unit/test_sparkml_serving.py | 2 +- tests/unit/test_tuner.py | 4 +- tests/unit/test_xgboost.py | 8 +- 63 files changed, 310 insertions(+), 319 deletions(-) diff --git a/src/sagemaker/amazon/amazon_estimator.py b/src/sagemaker/amazon/amazon_estimator.py index 164f006831..5484a0aff8 100644 --- a/src/sagemaker/amazon/amazon_estimator.py +++ b/src/sagemaker/amazon/amazon_estimator.py @@ -156,7 +156,7 @@ class constructor init_params[attribute] = init_params["hyperparameters"][value.name] del init_params["hyperparameters"] - del init_params["image"] + del init_params["image_uri"] return init_params def prepare_workflow_for_training(self, records=None, mini_batch_size=None, job_name=None): diff --git a/src/sagemaker/amazon/factorization_machines.py b/src/sagemaker/amazon/factorization_machines.py index 8c13c5a06a..4a299488ab 100644 --- a/src/sagemaker/amazon/factorization_machines.py +++ b/src/sagemaker/amazon/factorization_machines.py @@ -312,9 +312,9 @@ def __init__(self, model_data, role, sagemaker_session=None, **kwargs): """ sagemaker_session = sagemaker_session or Session() repo = "{}:{}".format(FactorizationMachines.repo_name, FactorizationMachines.repo_version) - image = "{}/{}".format(registry(sagemaker_session.boto_session.region_name), repo) + image_uri = "{}/{}".format(registry(sagemaker_session.boto_session.region_name), repo) super(FactorizationMachinesModel, self).__init__( - image, + image_uri, model_data, role, predictor_cls=FactorizationMachinesPredictor, diff --git a/src/sagemaker/amazon/ipinsights.py b/src/sagemaker/amazon/ipinsights.py index 789237e60e..a0301122d0 100644 --- a/src/sagemaker/amazon/ipinsights.py +++ b/src/sagemaker/amazon/ipinsights.py @@ -218,12 +218,12 @@ def __init__(self, model_data, role, sagemaker_session=None, **kwargs): """ sagemaker_session = sagemaker_session or Session() repo = "{}:{}".format(IPInsights.repo_name, IPInsights.repo_version) - image = "{}/{}".format( + image_uri = "{}/{}".format( registry(sagemaker_session.boto_session.region_name, IPInsights.repo_name), repo ) super(IPInsightsModel, self).__init__( - image, + image_uri, model_data, role, predictor_cls=IPInsightsPredictor, diff --git a/src/sagemaker/amazon/kmeans.py b/src/sagemaker/amazon/kmeans.py index 4c66bd5844..9080f8de68 100644 --- a/src/sagemaker/amazon/kmeans.py +++ b/src/sagemaker/amazon/kmeans.py @@ -243,9 +243,9 @@ def __init__(self, model_data, role, sagemaker_session=None, **kwargs): """ sagemaker_session = sagemaker_session or Session() repo = "{}:{}".format(KMeans.repo_name, KMeans.repo_version) - image = "{}/{}".format(registry(sagemaker_session.boto_session.region_name), repo) + image_uri = "{}/{}".format(registry(sagemaker_session.boto_session.region_name), repo) super(KMeansModel, self).__init__( - image, + image_uri, model_data, role, predictor_cls=KMeansPredictor, diff --git a/src/sagemaker/amazon/lda.py b/src/sagemaker/amazon/lda.py index ef111c21be..0563682a99 100644 --- a/src/sagemaker/amazon/lda.py +++ b/src/sagemaker/amazon/lda.py @@ -215,11 +215,11 @@ def __init__(self, model_data, role, sagemaker_session=None, **kwargs): """ sagemaker_session = sagemaker_session or Session() repo = "{}:{}".format(LDA.repo_name, LDA.repo_version) - image = "{}/{}".format( + image_uri = "{}/{}".format( registry(sagemaker_session.boto_session.region_name, LDA.repo_name), repo ) super(LDAModel, self).__init__( - image, + image_uri, model_data, role, predictor_cls=LDAPredictor, diff --git a/src/sagemaker/amazon/linear_learner.py b/src/sagemaker/amazon/linear_learner.py index 544a48efa9..3f011934f5 100644 --- a/src/sagemaker/amazon/linear_learner.py +++ b/src/sagemaker/amazon/linear_learner.py @@ -476,9 +476,9 @@ def __init__(self, model_data, role, sagemaker_session=None, **kwargs): """ sagemaker_session = sagemaker_session or Session() repo = "{}:{}".format(LinearLearner.repo_name, LinearLearner.repo_version) - image = "{}/{}".format(registry(sagemaker_session.boto_session.region_name), repo) + image_uri = "{}/{}".format(registry(sagemaker_session.boto_session.region_name), repo) super(LinearLearnerModel, self).__init__( - image, + image_uri, model_data, role, predictor_cls=LinearLearnerPredictor, diff --git a/src/sagemaker/amazon/ntm.py b/src/sagemaker/amazon/ntm.py index a977b50c1a..3d9a12dfbd 100644 --- a/src/sagemaker/amazon/ntm.py +++ b/src/sagemaker/amazon/ntm.py @@ -245,11 +245,11 @@ def __init__(self, model_data, role, sagemaker_session=None, **kwargs): """ sagemaker_session = sagemaker_session or Session() repo = "{}:{}".format(NTM.repo_name, NTM.repo_version) - image = "{}/{}".format( + image_uri = "{}/{}".format( registry(sagemaker_session.boto_session.region_name, NTM.repo_name), repo ) super(NTMModel, self).__init__( - image, + image_uri, model_data, role, predictor_cls=NTMPredictor, diff --git a/src/sagemaker/amazon/object2vec.py b/src/sagemaker/amazon/object2vec.py index 2fcd299365..e095516356 100644 --- a/src/sagemaker/amazon/object2vec.py +++ b/src/sagemaker/amazon/object2vec.py @@ -351,11 +351,11 @@ def __init__(self, model_data, role, sagemaker_session=None, **kwargs): """ sagemaker_session = sagemaker_session or Session() repo = "{}:{}".format(Object2Vec.repo_name, Object2Vec.repo_version) - image = "{}/{}".format( + image_uri = "{}/{}".format( registry(sagemaker_session.boto_session.region_name, Object2Vec.repo_name), repo ) super(Object2VecModel, self).__init__( - image, + image_uri, model_data, role, predictor_cls=Predictor, diff --git a/src/sagemaker/amazon/pca.py b/src/sagemaker/amazon/pca.py index a53055affc..75601212b3 100644 --- a/src/sagemaker/amazon/pca.py +++ b/src/sagemaker/amazon/pca.py @@ -227,9 +227,9 @@ def __init__(self, model_data, role, sagemaker_session=None, **kwargs): """ sagemaker_session = sagemaker_session or Session() repo = "{}:{}".format(PCA.repo_name, PCA.repo_version) - image = "{}/{}".format(registry(sagemaker_session.boto_session.region_name), repo) + image_uri = "{}/{}".format(registry(sagemaker_session.boto_session.region_name), repo) super(PCAModel, self).__init__( - image, + image_uri, model_data, role, predictor_cls=PCAPredictor, diff --git a/src/sagemaker/amazon/randomcutforest.py b/src/sagemaker/amazon/randomcutforest.py index 9855be0671..bcc5898f33 100644 --- a/src/sagemaker/amazon/randomcutforest.py +++ b/src/sagemaker/amazon/randomcutforest.py @@ -206,11 +206,11 @@ def __init__(self, model_data, role, sagemaker_session=None, **kwargs): """ sagemaker_session = sagemaker_session or Session() repo = "{}:{}".format(RandomCutForest.repo_name, RandomCutForest.repo_version) - image = "{}/{}".format( + image_uri = "{}/{}".format( registry(sagemaker_session.boto_session.region_name, RandomCutForest.repo_name), repo ) super(RandomCutForestModel, self).__init__( - image, + image_uri, model_data, role, predictor_cls=RandomCutForestPredictor, diff --git a/src/sagemaker/automl/automl.py b/src/sagemaker/automl/automl.py index 8f370ffc99..22cfb14c56 100644 --- a/src/sagemaker/automl/automl.py +++ b/src/sagemaker/automl/automl.py @@ -307,12 +307,12 @@ def create_model( models = [] for container in inference_containers: - image = container["Image"] + image_uri = container["Image"] model_data = container["ModelDataUrl"] env = container["Environment"] model = Model( - image=image, + image_uri=image_uri, model_data=model_data, role=self.role, env=env, diff --git a/src/sagemaker/automl/candidate_estimator.py b/src/sagemaker/automl/candidate_estimator.py index 864e2cd070..63a94ac2d5 100644 --- a/src/sagemaker/automl/candidate_estimator.py +++ b/src/sagemaker/automl/candidate_estimator.py @@ -211,24 +211,25 @@ def _get_train_args( Returns (dcit): a dictionary that can be used as args of sagemaker_session.train method. """ - train_args = {} - train_args["input_config"] = inputs - train_args["job_name"] = name - train_args["input_mode"] = desc["AlgorithmSpecification"]["TrainingInputMode"] - train_args["role"] = desc["RoleArn"] - train_args["output_config"] = desc["OutputDataConfig"] - train_args["resource_config"] = desc["ResourceConfig"] - train_args["image"] = desc["AlgorithmSpecification"]["TrainingImage"] - train_args["enable_network_isolation"] = desc["EnableNetworkIsolation"] - train_args["encrypt_inter_container_traffic"] = encrypt_inter_container_traffic - train_args["train_use_spot_instances"] = desc["EnableManagedSpotTraining"] - train_args["hyperparameters"] = {} - train_args["stop_condition"] = {} - train_args["metric_definitions"] = None - train_args["checkpoint_s3_uri"] = None - train_args["checkpoint_local_path"] = None - train_args["tags"] = [] - train_args["vpc_config"] = None + train_args = { + "input_config": inputs, + "job_name": name, + "input_mode": desc["AlgorithmSpecification"]["TrainingInputMode"], + "role": desc["RoleArn"], + "output_config": desc["OutputDataConfig"], + "resource_config": desc["ResourceConfig"], + "image_uri": desc["AlgorithmSpecification"]["TrainingImage"], + "enable_network_isolation": desc["EnableNetworkIsolation"], + "encrypt_inter_container_traffic": encrypt_inter_container_traffic, + "train_use_spot_instances": desc["EnableManagedSpotTraining"], + "hyperparameters": {}, + "stop_condition": {}, + "metric_definitions": None, + "checkpoint_s3_uri": None, + "checkpoint_local_path": None, + "tags": [], + "vpc_config": None, + } if volume_kms_key is not None: train_args["resource_config"]["VolumeKmsKeyId"] = volume_kms_key diff --git a/src/sagemaker/chainer/estimator.py b/src/sagemaker/chainer/estimator.py index 4625a3e750..194a762199 100644 --- a/src/sagemaker/chainer/estimator.py +++ b/src/sagemaker/chainer/estimator.py @@ -208,8 +208,8 @@ def create_model( """ kwargs["name"] = self._get_or_create_name(kwargs.get("name")) - if "image" not in kwargs: - kwargs["image"] = self.image_uri + if "image_uri" not in kwargs: + kwargs["image_uri"] = self.image_uri return ChainerModel( self.model_data, @@ -257,7 +257,7 @@ class constructor if value: init_params[argument[len("sagemaker_") :]] = value - image_uri = init_params.pop("image") + image_uri = init_params.pop("image_uri") framework, py_version, tag, _ = framework_name_from_image(image_uri) if tag is None: diff --git a/src/sagemaker/chainer/model.py b/src/sagemaker/chainer/model.py index 8e154092cf..4df66481cb 100644 --- a/src/sagemaker/chainer/model.py +++ b/src/sagemaker/chainer/model.py @@ -64,7 +64,7 @@ def __init__( model_data, role, entry_point, - image=None, + image_uri=None, framework_version=None, py_version=None, predictor_cls=ChainerPredictor, @@ -85,16 +85,16 @@ def __init__( file which should be executed as the entry point to model hosting. If ``source_dir`` is specified, then ``entry_point`` must point to a file located at the root of ``source_dir``. - image (str): A Docker image URI (default: None). If not specified, a + image_uri (str): A Docker image URI (default: None). If not specified, a default image for Chainer will be used. If ``framework_version`` - or ``py_version`` are ``None``, then ``image`` is required. If + or ``py_version`` are ``None``, then ``image_uri`` is required. If also ``None``, then a ``ValueError`` will be raised. framework_version (str): Chainer version you want to use for executing your model training code. Defaults to ``None``. Required - unless ``image`` is provided. + unless ``image_uri`` is provided. py_version (str): Python version you want to use for executing your model training code. Defaults to ``None``. Required unless - ``image`` is provided. + ``image_uri`` is provided. predictor_cls (callable[str, sagemaker.session.Session]): A function to call to create a predictor with an endpoint name and SageMaker ``Session``. If specified, ``deploy()`` returns the @@ -111,7 +111,7 @@ def __init__( :class:`~sagemaker.model.FrameworkModel` and :class:`~sagemaker.model.Model`. """ - validate_version_or_image_args(framework_version, py_version, image) + validate_version_or_image_args(framework_version, py_version, image_uri) if py_version == "py2": logger.warning( python_deprecation_warning(self.__framework_name__, defaults.LATEST_PY2_VERSION) @@ -120,7 +120,7 @@ def __init__( self.py_version = py_version super(ChainerModel, self).__init__( - model_data, image, role, entry_point, predictor_cls=predictor_cls, **kwargs + model_data, image_uri, role, entry_point, predictor_cls=predictor_cls, **kwargs ) self.model_server_workers = model_server_workers @@ -140,7 +140,7 @@ def prepare_container_def(self, instance_type=None, accelerator_type=None): dict[str, str]: A container definition object usable with the CreateModel API. """ - deploy_image = self.image + deploy_image = self.image_uri if not deploy_image: if instance_type is None: raise ValueError( diff --git a/src/sagemaker/estimator.py b/src/sagemaker/estimator.py index 2e7b66c85b..56a62b1ec4 100644 --- a/src/sagemaker/estimator.py +++ b/src/sagemaker/estimator.py @@ -795,7 +795,7 @@ class constructor if "AlgorithmName" in job_details["AlgorithmSpecification"]: init_params["algorithm_arn"] = job_details["AlgorithmSpecification"]["AlgorithmName"] elif "TrainingImage" in job_details["AlgorithmSpecification"]: - init_params["image"] = job_details["AlgorithmSpecification"]["TrainingImage"] + init_params["image_uri"] = job_details["AlgorithmSpecification"]["TrainingImage"] else: raise RuntimeError( "Invalid AlgorithmSpecification. Either TrainingImage or " @@ -1037,7 +1037,7 @@ def start_new(cls, estimator, inputs, experiment_config): if isinstance(estimator, sagemaker.algorithm.AlgorithmEstimator): train_args["algorithm_arn"] = estimator.algorithm_arn else: - train_args["image"] = estimator.train_image() + train_args["image_uri"] = estimator.train_image() if estimator.debugger_rule_configs: train_args["debugger_rule_configs"] = estimator.debugger_rule_configs @@ -1331,7 +1331,7 @@ def hyperparameters(self): def create_model( self, role=None, - image=None, + image_uri=None, predictor_cls=None, serializer=None, deserializer=None, @@ -1350,7 +1350,7 @@ def create_model( role (str): The ``ExecutionRoleArn`` IAM Role ARN for the ``Model``, which is also used during transform jobs. If not specified, the role from the Estimator will be used. - image (str): An container image to use for deploying the model. + image_uri (str): A Docker image URI to use for deploying the model. Defaults to the image used for training. predictor_cls (Predictor): The predictor class to use when deploying the model. @@ -1393,7 +1393,7 @@ def predict_wrapper(endpoint, session): kwargs["enable_network_isolation"] = self.enable_network_isolation() return Model( - image or self.train_image(), + image_uri or self.train_image(), self.model_data, role, vpc_config=self.get_vpc_config(vpc_config_override), @@ -1402,27 +1402,6 @@ def predict_wrapper(endpoint, session): **kwargs ) - @classmethod - def _prepare_init_params_from_job_description(cls, job_details, model_channel_name=None): - """Convert the job description to init params that can be handled by the - class constructor - - Args: - job_details: the returned job details from a describe_training_job - API call. - model_channel_name (str): Name of the channel where pre-trained - model data will be downloaded - - Returns: - dictionary: The transformed init_params - """ - init_params = super(Estimator, cls)._prepare_init_params_from_job_description( - job_details, model_channel_name - ) - - init_params["image_uri"] = init_params.pop("image") - return init_params - class Framework(EstimatorBase): """Base class that cannot be instantiated directly. diff --git a/src/sagemaker/model.py b/src/sagemaker/model.py index 055bbb2716..e030bb3439 100644 --- a/src/sagemaker/model.py +++ b/src/sagemaker/model.py @@ -60,7 +60,7 @@ class Model(object): def __init__( self, - image, + image_uri, model_data=None, role=None, predictor_cls=None, @@ -74,7 +74,7 @@ def __init__( """Initialize an SageMaker ``Model``. Args: - image (str): A Docker image URI. + image_uri (str): A Docker image URI. model_data (str): The S3 location of a SageMaker model data ``.tar.gz`` file (default: None). role (str): An AWS IAM role (either name or full ARN). The Amazon @@ -89,7 +89,7 @@ def __init__( function to call to create a predictor (default: None). If not None, ``deploy`` will return the result of invoking this function on the created endpoint name. - env (dict[str, str]): Environment variables to run with ``image`` + env (dict[str, str]): Environment variables to run with ``image_uri`` when hosted in SageMaker (default: None). name (str): The model name. If None, a default model name will be selected on each ``deploy``. @@ -108,10 +108,8 @@ def __init__( model_kms_key (str): KMS key ARN used to encrypt the repacked model archive file if the model is repacked """ - LOGGER.warning(fw_utils.parameter_v2_rename_warning("image", "image_uri")) - self.model_data = model_data - self.image = image + self.image_uri = image_uri self.role = role self.predictor_cls = predictor_cls self.env = env or {} @@ -156,7 +154,7 @@ def prepare_container_def( Returns: dict: A container definition object usable with the CreateModel API. """ - return sagemaker.container_def(self.image, self.model_data, self.env) + return sagemaker.container_def(self.image_uri, self.model_data, self.env) def enable_network_isolation(self): """Whether to enable network isolation when creating this Model @@ -200,10 +198,10 @@ def _create_sagemaker_model(self, instance_type=None, accelerator_type=None, tag tags=tags, ) - def _ensure_base_name_if_needed(self, image): - """Create a base name from the image if there is no model name provided.""" + def _ensure_base_name_if_needed(self, image_uri): + """Create a base name from the image URI if there is no model name provided.""" if self.name is None: - self._base_name = self._base_name or utils.base_name_from_image(image) + self._base_name = self._base_name or utils.base_name_from_image(image_uri) def _set_model_name_if_needed(self): """Generate a new model name if ``self._base_name`` is present.""" @@ -288,7 +286,7 @@ def _neo_image_account(self, region): ) return NEO_IMAGE_ACCOUNT[region] - def _neo_image(self, region, target_instance_type, framework, framework_version): + def _neo_image_uri(self, region, target_instance_type, framework, framework_version): """ Args: region: @@ -305,7 +303,7 @@ def _neo_image(self, region, target_instance_type, framework, framework_version) account=self._neo_image_account(region), ) - def _inferentia_image(self, region, target_instance_type, framework, framework_version): + def _inferentia_image_uri(self, region, target_instance_type, framework, framework_version): """ Args: region: @@ -395,7 +393,7 @@ def compile( job_status = self.sagemaker_session.wait_for_compilation_job(job_name) self.model_data = job_status["ModelArtifacts"]["S3ModelArtifacts"] if target_instance_family.startswith("ml_"): - self.image = self._neo_image( + self.image_uri = self._neo_image_uri( self.sagemaker_session.boto_region_name, target_instance_family, framework, @@ -403,7 +401,7 @@ def compile( ) self._is_compiled_model = True elif target_instance_family.startswith(INFERENTIA_INSTANCE_PREFIX): - self.image = self._inferentia_image( + self.image_uri = self._inferentia_image_uri( self.sagemaker_session.boto_region_name, target_instance_family, framework, @@ -484,7 +482,7 @@ def deploy( compiled_model_suffix = "-".join(instance_type.split(".")[:-1]) if self._is_compiled_model: - self._ensure_base_name_if_needed(self.image) + self._ensure_base_name_if_needed(self.image_uri) if self._base_name is not None: self._base_name = "-".join((self._base_name, compiled_model_suffix)) @@ -619,7 +617,7 @@ class FrameworkModel(Model): def __init__( self, model_data, - image, + image_uri, role, entry_point, source_dir=None, @@ -639,7 +637,7 @@ def __init__( Args: model_data (str): The S3 location of a SageMaker model data ``.tar.gz`` file. - image (str): A Docker image URI. + image_uri (str): A Docker image URI. role (str): An IAM role name or ARN for SageMaker to access AWS resources on your behalf. entry_point (str): Path (absolute or relative) to the Python source @@ -680,7 +678,7 @@ def __init__( function to call to create a predictor (default: None). If not None, ``deploy`` will return the result of invoking this function on the created endpoint name. - env (dict[str, str]): Environment variables to run with ``image`` + env (dict[str, str]): Environment variables to run with ``image_uri`` when hosted in SageMaker (default: None). name (str): The model name. If None, a default model name will be selected on each ``deploy``. @@ -781,7 +779,7 @@ def __init__( :class:`~sagemaker.model.Model`. """ super(FrameworkModel, self).__init__( - image, + image_uri, model_data, role, predictor_cls=predictor_cls, @@ -827,11 +825,13 @@ def prepare_container_def(self, instance_type=None, accelerator_type=None): dict[str, str]: A container definition object usable with the CreateModel API. """ - deploy_key_prefix = fw_utils.model_code_key_prefix(self.key_prefix, self.name, self.image) + deploy_key_prefix = fw_utils.model_code_key_prefix( + self.key_prefix, self.name, self.image_uri + ) self._upload_code(deploy_key_prefix) deploy_env = dict(self.env) deploy_env.update(self._framework_env_vars()) - return sagemaker.container_def(self.image, self.model_data, deploy_env) + return sagemaker.container_def(self.image_uri, self.model_data, deploy_env) def _upload_code(self, key_prefix, repack=False): """ @@ -918,7 +918,9 @@ def __init__(self, role, model_data=None, algorithm_arn=None, model_package_arn= ``model_data`` is not required. **kwargs: Additional kwargs passed to the Model constructor. """ - super(ModelPackage, self).__init__(role=role, model_data=model_data, image=None, **kwargs) + super(ModelPackage, self).__init__( + role=role, model_data=model_data, image_uri=None, **kwargs + ) if model_package_arn and algorithm_arn: raise ValueError( diff --git a/src/sagemaker/multidatamodel.py b/src/sagemaker/multidatamodel.py index e06d620df8..bdb1c457c8 100644 --- a/src/sagemaker/multidatamodel.py +++ b/src/sagemaker/multidatamodel.py @@ -35,7 +35,7 @@ def __init__( name, model_data_prefix, model=None, - image=None, + image_uri=None, role=None, sagemaker_session=None, **kwargs @@ -50,9 +50,9 @@ def __init__( model (sagemaker.Model): The Model object that would define the SageMaker model attributes like vpc_config, predictors, etc. If this is present, the attributes from this model are used when - deploying the ``MultiDataModel``. Parameters 'image', 'role' and 'kwargs' + deploying the ``MultiDataModel``. Parameters 'image_uri', 'role' and 'kwargs' are not permitted when model parameter is set. - image (str): A Docker image URI. It can be null if the 'model' parameter + image_uri (str): A Docker image URI. It can be null if the 'model' parameter is passed to during ``MultiDataModel`` initialization (default: None) role (str): An AWS IAM role (either name or full ARN). The Amazon SageMaker training jobs and APIs that create Amazon SageMaker @@ -82,9 +82,10 @@ def __init__( ) ) - if model and (image or role or kwargs): + if model and (image_uri or role or kwargs): raise ValueError( - "Parameters image, role or kwargs are not permitted when model parameter is passed." + "Parameters image_uri, role, and kwargs are not permitted when " + "model parameter is passed." ) self.name = name @@ -103,7 +104,7 @@ def __init__( # Set the ``Model`` parameters if the model parameter is not specified if not self.model: super(MultiDataModel, self).__init__( - image, + image_uri, self.model_data_prefix, role, name=self.name, @@ -121,18 +122,18 @@ def prepare_container_def(self, instance_type=None, accelerator_type=None): Returns: dict[str, str]: A complete container definition object usable with the CreateModel API """ - # Copy the trained model's image and environment variables if they exist. Models trained + # Copy the trained model's image URI and environment variables if they exist. Models trained # with FrameworkEstimator set framework specific environment variables which need to be # copied over if self.model: container_definition = self.model.prepare_container_def(instance_type, accelerator_type) - image = container_definition["Image"] + image_uri = container_definition["Image"] environment = container_definition["Environment"] else: - image = self.image + image_uri = self.image_uri environment = self.env return sagemaker.container_def( - image, + image_uri, env=environment, model_data_url=self.model_data_prefix, container_mode=self.container_mode, diff --git a/src/sagemaker/mxnet/estimator.py b/src/sagemaker/mxnet/estimator.py index f613327bb3..9e35d683ac 100644 --- a/src/sagemaker/mxnet/estimator.py +++ b/src/sagemaker/mxnet/estimator.py @@ -213,8 +213,8 @@ def create_model( sagemaker.mxnet.model.MXNetModel: A SageMaker ``MXNetModel`` object. See :func:`~sagemaker.mxnet.model.MXNetModel` for full details. """ - if "image" not in kwargs: - kwargs["image"] = image_uri or self.image_uri + if "image_uri" not in kwargs: + kwargs["image_uri"] = image_uri or self.image_uri kwargs["name"] = self._get_or_create_name(kwargs.get("name")) @@ -252,7 +252,7 @@ class constructor init_params = super(MXNet, cls)._prepare_init_params_from_job_description( job_details, model_channel_name ) - image_uri = init_params.pop("image") + image_uri = init_params.pop("image_uri") framework, py_version, tag, _ = framework_name_from_image(image_uri) # We switched image tagging scheme from regular image version (e.g. '1.0') to more diff --git a/src/sagemaker/mxnet/model.py b/src/sagemaker/mxnet/model.py index 24cb5a5252..202ea6227e 100644 --- a/src/sagemaker/mxnet/model.py +++ b/src/sagemaker/mxnet/model.py @@ -67,7 +67,7 @@ def __init__( entry_point, framework_version=None, py_version=None, - image=None, + image_uri=None, predictor_cls=MXNetPredictor, model_server_workers=None, **kwargs @@ -88,15 +88,15 @@ def __init__( must point to a file located at the root of ``source_dir``. framework_version (str): MXNet version you want to use for executing your model training code. Defaults to ``None``. Required unless - ``image`` is provided. + ``image_uri`` is provided. py_version (str): Python version you want to use for executing your model training code. Defaults to ``None``. Required unless - ``image`` is provided. - image (str): A Docker image URI (default: None). If not specified, a + ``image_uri`` is provided. + image_uri (str): A Docker image URI (default: None). If not specified, a default image for MXNet will be used. If ``framework_version`` or ``py_version`` are ``None``, then - ``image`` is required. If also ``None``, then a ``ValueError`` + ``image_uri`` is required. If also ``None``, then a ``ValueError`` will be raised. predictor_cls (callable[str, sagemaker.session.Session]): A function to call to create a predictor with an endpoint name and @@ -114,7 +114,7 @@ def __init__( :class:`~sagemaker.model.FrameworkModel` and :class:`~sagemaker.model.Model`. """ - validate_version_or_image_args(framework_version, py_version, image) + validate_version_or_image_args(framework_version, py_version, image_uri) if py_version == "py2": logger.warning( python_deprecation_warning(self.__framework_name__, defaults.LATEST_PY2_VERSION) @@ -123,7 +123,7 @@ def __init__( self.py_version = py_version super(MXNetModel, self).__init__( - model_data, image, role, entry_point, predictor_cls=predictor_cls, **kwargs + model_data, image_uri, role, entry_point, predictor_cls=predictor_cls, **kwargs ) self.model_server_workers = model_server_workers @@ -143,7 +143,7 @@ def prepare_container_def(self, instance_type=None, accelerator_type=None): dict[str, str]: A container definition object usable with the CreateModel API. """ - deploy_image = self.image + deploy_image = self.image_uri if not deploy_image: if instance_type is None: raise ValueError( diff --git a/src/sagemaker/pytorch/estimator.py b/src/sagemaker/pytorch/estimator.py index 6327916cdc..022ee5756a 100644 --- a/src/sagemaker/pytorch/estimator.py +++ b/src/sagemaker/pytorch/estimator.py @@ -167,8 +167,8 @@ def create_model( sagemaker.pytorch.model.PyTorchModel: A SageMaker ``PyTorchModel`` object. See :func:`~sagemaker.pytorch.model.PyTorchModel` for full details. """ - if "image" not in kwargs: - kwargs["image"] = self.image_uri + if "image_uri" not in kwargs: + kwargs["image_uri"] = self.image_uri kwargs["name"] = self._get_or_create_name(kwargs.get("name")) @@ -206,7 +206,7 @@ class constructor init_params = super(PyTorch, cls)._prepare_init_params_from_job_description( job_details, model_channel_name ) - image_uri = init_params.pop("image") + image_uri = init_params.pop("image_uri") framework, py_version, tag, _ = framework_name_from_image(image_uri) if tag is None: diff --git a/src/sagemaker/pytorch/model.py b/src/sagemaker/pytorch/model.py index 8f94a06a66..60fec71100 100644 --- a/src/sagemaker/pytorch/model.py +++ b/src/sagemaker/pytorch/model.py @@ -68,7 +68,7 @@ def __init__( entry_point, framework_version=None, py_version=None, - image=None, + image_uri=None, predictor_cls=PyTorchPredictor, model_server_workers=None, **kwargs @@ -89,13 +89,13 @@ def __init__( must point to a file located at the root of ``source_dir``. framework_version (str): PyTorch version you want to use for executing your model training code. Defaults to None. Required - unless ``image`` is provided. + unless ``image_uri`` is provided. py_version (str): Python version you want to use for executing your model training code. Defaults to ``None``. Required unless - ``image`` is provided. - image (str): A Docker image URI (default: None). If not specified, a + ``image_uri`` is provided. + image_uri (str): A Docker image URI (default: None). If not specified, a default image for PyTorch will be used. If ``framework_version`` - or ``py_version`` are ``None``, then ``image`` is required. If + or ``py_version`` are ``None``, then ``image_uri`` is required. If also ``None``, then a ``ValueError`` will be raised. predictor_cls (callable[str, sagemaker.session.Session]): A function to call to create a predictor with an endpoint name and @@ -113,7 +113,7 @@ def __init__( :class:`~sagemaker.model.FrameworkModel` and :class:`~sagemaker.model.Model`. """ - validate_version_or_image_args(framework_version, py_version, image) + validate_version_or_image_args(framework_version, py_version, image_uri) if py_version == "py2": logger.warning( python_deprecation_warning(self.__framework_name__, defaults.LATEST_PY2_VERSION) @@ -122,7 +122,7 @@ def __init__( self.py_version = py_version super(PyTorchModel, self).__init__( - model_data, image, role, entry_point, predictor_cls=predictor_cls, **kwargs + model_data, image_uri, role, entry_point, predictor_cls=predictor_cls, **kwargs ) self.model_server_workers = model_server_workers @@ -142,7 +142,7 @@ def prepare_container_def(self, instance_type=None, accelerator_type=None): dict[str, str]: A container definition object usable with the CreateModel API. """ - deploy_image = self.image + deploy_image = self.image_uri if not deploy_image: if instance_type is None: raise ValueError( diff --git a/src/sagemaker/rl/estimator.py b/src/sagemaker/rl/estimator.py index d2e7cc6f7c..f02e377a46 100644 --- a/src/sagemaker/rl/estimator.py +++ b/src/sagemaker/rl/estimator.py @@ -221,7 +221,7 @@ def create_model( base_args = dict( model_data=self.model_data, role=role or self.role, - image=kwargs.get("image", self.image_uri), + image_uri=kwargs.get("image_uri", self.image_uri), container_log_level=self.container_log_level, sagemaker_session=self.sagemaker_session, vpc_config=self.get_vpc_config(vpc_config_override), @@ -303,7 +303,7 @@ class constructor job_details, model_channel_name ) - image_uri = init_params.pop("image") + image_uri = init_params.pop("image_uri") framework, _, tag, _ = fw_utils.framework_name_from_image(image_uri) if not framework: diff --git a/src/sagemaker/session.py b/src/sagemaker/session.py index 3a58ac974a..066007f251 100644 --- a/src/sagemaker/session.py +++ b/src/sagemaker/session.py @@ -429,7 +429,7 @@ def train( # noqa: C901 tags, metric_definitions, enable_network_isolation=False, - image=None, + image_uri=None, algorithm_arn=None, encrypt_inter_container_traffic=False, train_use_spot_instances=False, @@ -486,7 +486,7 @@ def train( # noqa: C901 metric from the logs. enable_network_isolation (bool): Whether to request for the training job to run with network isolation or not. - image (str): Docker image containing training code. + image_uri (str): Docker image containing training code. algorithm_arn (str): Algorithm Arn from Marketplace. encrypt_inter_container_traffic (bool): Specifies whether traffic between training containers is encrypted for the training job (default: ``False``). @@ -523,17 +523,17 @@ def train( # noqa: C901 "RoleArn": role, } - if image and algorithm_arn: + if image_uri and algorithm_arn: raise ValueError( - "image and algorithm_arn are mutually exclusive." - "Both were provided: image: %s algorithm_arn: %s" % (image, algorithm_arn) + "image_uri and algorithm_arn are mutually exclusive." + "Both were provided: image_uri: %s algorithm_arn: %s" % (image_uri, algorithm_arn) ) - if image is None and algorithm_arn is None: - raise ValueError("either image or algorithm_arn is required. None was provided.") + if image_uri is None and algorithm_arn is None: + raise ValueError("either image_uri or algorithm_arn is required. None was provided.") - if image is not None: - train_request["AlgorithmSpecification"]["TrainingImage"] = image + if image_uri is not None: + train_request["AlgorithmSpecification"]["TrainingImage"] = image_uri if algorithm_arn is not None: train_request["AlgorithmSpecification"]["AlgorithmName"] = algorithm_arn @@ -1535,7 +1535,7 @@ def tune( # noqa: C901 tags, warm_start_config, enable_network_isolation=False, - image=None, + image_uri=None, algorithm_arn=None, early_stopping_type="Off", encrypt_inter_container_traffic=False, @@ -1561,7 +1561,7 @@ def tune( # noqa: C901 hyperparameters remain unchanged across all of the training jobs for the hyperparameter tuning job. The hyperparameters are made accessible as a dictionary for the training code on SageMaker. - image (str): Docker image containing training code. + image_uri (str): Docker image URI containing training code. algorithm_arn (str): Resource ARN for training algorithm created on or subscribed from AWS Marketplace (default: None). input_mode (str): The input mode that the algorithm supports. Valid modes: @@ -1639,7 +1639,7 @@ def tune( # noqa: C901 static_hyperparameters=static_hyperparameters, role=role, input_mode=input_mode, - image=image, + image_uri=image_uri, algorithm_arn=algorithm_arn, metric_definitions=metric_definitions, input_config=input_config, @@ -1829,7 +1829,7 @@ def _map_training_config( stop_condition, input_config=None, metric_definitions=None, - image=None, + image_uri=None, algorithm_arn=None, vpc_config=None, enable_network_isolation=False, @@ -1879,7 +1879,7 @@ def _map_training_config( the name of the metric, and 'Regex' for the regular expression used to extract the metric from the logs. This should be defined only for jobs that don't use an Amazon algorithm. - image (str): Docker image containing training code. + image_uri (str): Docker image URI containing training code. algorithm_arn (str): Resource ARN for training algorithm created or subscribed on AWS Marketplace vpc_config (dict): Contains values for VpcConfig (default: None): @@ -1922,7 +1922,7 @@ def _map_training_config( if algorithm_arn: algorithm_spec["AlgorithmName"] = algorithm_arn else: - algorithm_spec["TrainingImage"] = image + algorithm_spec["TrainingImage"] = image_uri training_job_definition["AlgorithmSpecification"] = algorithm_spec @@ -2156,7 +2156,7 @@ def create_model_from_job( training_job_name, name=None, role=None, - primary_container_image=None, + primary_container_image_uri=None, model_data_url=None, env=None, vpc_config_override=vpc_utils.VPC_CONFIG_DEFAULT, @@ -2171,8 +2171,8 @@ def create_model_from_job( role (str): The ``ExecutionRoleArn`` IAM Role ARN for the ``Model``, specified either by an IAM role name or role ARN. If None, the ``RoleArn`` from the SageMaker Training Job will be used. - primary_container_image (str): The Docker image reference (default: None). If None, it - defaults to the Training Image in ``training_job_name``. + primary_container_image_uri (str): The Docker image URI (default: None). If None, it + defaults to the training image URI from ``training_job_name``. model_data_url (str): S3 location of the model data (default: None). If None, defaults to the ``ModelS3Artifacts`` of ``training_job_name``. env (dict[string,string]): Model environment variables (default: {}). @@ -2194,7 +2194,7 @@ def create_model_from_job( role = role or training_job["RoleArn"] env = env or {} primary_container = container_def( - primary_container_image or training_job["AlgorithmSpecification"]["TrainingImage"], + primary_container_image_uri or training_job["AlgorithmSpecification"]["TrainingImage"], model_data_url=model_data_url or training_job["ModelArtifacts"]["S3ModelArtifacts"], env=env, ) @@ -2700,7 +2700,7 @@ def endpoint_from_job( job_name, initial_instance_count, instance_type, - deployment_image=None, + deployment_image_uri=None, name=None, role=None, wait=True, @@ -2725,7 +2725,7 @@ def endpoint_from_job( autoscaling. instance_type (str): Type of EC2 instance to deploy to an endpoint for prediction, for example, 'ml.c4.xlarge'. - deployment_image (str): The Docker image which defines the inference code to be used + deployment_image_uri (str): The Docker image which defines the inference code to be used as the entry point for accepting prediction requests. If not specified, uses the image used for the training job. name (str): Name of the ``Endpoint`` to create. If not specified, uses the training job @@ -2755,14 +2755,16 @@ def endpoint_from_job( """ job_desc = self.sagemaker_client.describe_training_job(TrainingJobName=job_name) output_url = job_desc["ModelArtifacts"]["S3ModelArtifacts"] - deployment_image = deployment_image or job_desc["AlgorithmSpecification"]["TrainingImage"] + deployment_image_uri = ( + deployment_image_uri or job_desc["AlgorithmSpecification"]["TrainingImage"] + ) role = role or job_desc["RoleArn"] name = name or job_name vpc_config_override = _vpc_config_from_training_job(job_desc, vpc_config_override) return self.endpoint_from_model_data( model_s3_location=output_url, - deployment_image=deployment_image, + deployment_image_uri=deployment_image_uri, initial_instance_count=initial_instance_count, instance_type=instance_type, name=name, @@ -2777,7 +2779,7 @@ def endpoint_from_job( def endpoint_from_model_data( self, model_s3_location, - deployment_image, + deployment_image_uri, initial_instance_count, instance_type, name=None, @@ -2792,8 +2794,8 @@ def endpoint_from_model_data( Args: model_s3_location (str): S3 URI of the model artifacts to use for the endpoint. - deployment_image (str): The Docker image which defines the runtime code to be used as - the entry point for accepting prediction requests. + deployment_image_uri (str): The Docker image URI which defines the runtime code to be + used as the entry point for accepting prediction requests. initial_instance_count (int): Minimum number of EC2 instances to launch. The actual number of active instances for an endpoint at any given time varies due to autoscaling. @@ -2824,7 +2826,7 @@ def endpoint_from_model_data( """ model_environment_vars = model_environment_vars or {} - name = name or name_from_image(deployment_image) + name = name or name_from_image(deployment_image_uri) model_vpc_config = vpc_utils.sanitize(model_vpc_config) if _deployment_entity_exists( @@ -2838,7 +2840,9 @@ def endpoint_from_model_data( lambda: self.sagemaker_client.describe_model(ModelName=name) ): primary_container = container_def( - image=deployment_image, model_data_url=model_s3_location, env=model_environment_vars + image_uri=deployment_image_uri, + model_data_url=model_s3_location, + env=model_environment_vars, ) self.create_model( name=name, role=role, container_defs=primary_container, vpc_config=model_vpc_config @@ -3249,11 +3253,11 @@ def logs_for_transform_job(self, job_name, wait=False, poll=10): print() -def container_def(image, model_data_url=None, env=None, container_mode=None): +def container_def(image_uri, model_data_url=None, env=None, container_mode=None): """Create a definition for executing a container as part of a SageMaker model. Args: - image (str): Docker image to run for this container. + image_uri (str): Docker image URI to run for this container. model_data_url (str): S3 URI of data required by this container, e.g. SageMaker training job model artifacts (default: None). env (dict[str, str]): Environment variables to set inside the container (default: None). @@ -3267,7 +3271,7 @@ def container_def(image, model_data_url=None, env=None, container_mode=None): """ if env is None: env = {} - c_def = {"Image": image, "Environment": env} + c_def = {"Image": image_uri, "Environment": env} if model_data_url: c_def["ModelDataUrl"] = model_data_url if container_mode: @@ -3377,21 +3381,21 @@ class ModelContainer(object): Attributes: model_data (str): S3 Model artifact location - image (str): Docker image URL in ECR + image_uri (str): Docker image URL in ECR env (dict[str,str]): Environment variable mapping """ - def __init__(self, model_data, image, env=None): + def __init__(self, model_data, image_uri, env=None): """Create a definition of a model which can be part of an Inference Pipeline Args: model_data (str): The S3 location of a SageMaker model data ``.tar.gz`` file. - image (str): A Docker image URI. - env (dict[str, str]): Environment variables to run with ``image`` when hosted in + image_uri (str): A Docker image URI. + env (dict[str, str]): Environment variables to run with ``image_uri`` when hosted in SageMaker (default: None). """ self.model_data = model_data - self.image = image + self.image_uri = image_uri self.env = env diff --git a/src/sagemaker/sklearn/estimator.py b/src/sagemaker/sklearn/estimator.py index 0cf9be61dd..1ce68014a6 100644 --- a/src/sagemaker/sklearn/estimator.py +++ b/src/sagemaker/sklearn/estimator.py @@ -188,8 +188,8 @@ def create_model( role = role or self.role kwargs["name"] = self._get_or_create_name(kwargs.get("name")) - if "image" not in kwargs: - kwargs["image"] = self.image_uri + if "image_uri" not in kwargs: + kwargs["image_uri"] = self.image_uri if "enable_network_isolation" not in kwargs: kwargs["enable_network_isolation"] = self.enable_network_isolation() @@ -228,7 +228,7 @@ class constructor init_params = super(SKLearn, cls)._prepare_init_params_from_job_description( job_details, model_channel_name ) - image_uri = init_params.pop("image") + image_uri = init_params.pop("image_uri") framework, py_version, tag, _ = framework_name_from_image(image_uri) if tag is None: diff --git a/src/sagemaker/sklearn/model.py b/src/sagemaker/sklearn/model.py index 9f7e0492fb..7e32fc2b98 100644 --- a/src/sagemaker/sklearn/model.py +++ b/src/sagemaker/sklearn/model.py @@ -62,7 +62,7 @@ def __init__( entry_point, framework_version=None, py_version="py3", - image=None, + image_uri=None, predictor_cls=SKLearnPredictor, model_server_workers=None, **kwargs @@ -83,16 +83,16 @@ def __init__( must point to a file located at the root of ``source_dir``. framework_version (str): Scikit-learn version you want to use for executing your model training code. Defaults to ``None``. Required - unless ``image`` is provided. + unless ``image_uri`` is provided. py_version (str): Python version you want to use for executing your model training code (default: 'py3'). Currently, 'py3' is the only - supported version. If ``None`` is passed in, ``image`` must be + supported version. If ``None`` is passed in, ``image_uri`` must be provided. - image (str): A Docker image URI (default: None). If not specified, a + image_uri (str): A Docker image URI (default: None). If not specified, a default image for Scikit-learn will be used. If ``framework_version`` or ``py_version`` are ``None``, then - ``image`` is required. If also ``None``, then a ``ValueError`` + ``image_uri`` is required. If also ``None``, then a ``ValueError`` will be raised. predictor_cls (callable[str, sagemaker.session.Session]): A function to call to create a predictor with an endpoint name and @@ -110,7 +110,7 @@ def __init__( :class:`~sagemaker.model.FrameworkModel` and :class:`~sagemaker.model.Model`. """ - validate_version_or_image_args(framework_version, py_version, image) + validate_version_or_image_args(framework_version, py_version, image_uri) if py_version and py_version != "py3": raise AttributeError( "Scikit-learn image only supports Python 3. Please use 'py3' for py_version." @@ -119,7 +119,7 @@ def __init__( self.py_version = py_version super(SKLearnModel, self).__init__( - model_data, image, role, entry_point, predictor_cls=predictor_cls, **kwargs + model_data, image_uri, role, entry_point, predictor_cls=predictor_cls, **kwargs ) self.model_server_workers = model_server_workers @@ -143,7 +143,7 @@ def prepare_container_def(self, instance_type=None, accelerator_type=None): if accelerator_type: raise ValueError("Accelerator types are not supported for Scikit-Learn.") - deploy_image = self.image + deploy_image = self.image_uri if not deploy_image: deploy_image = self.serving_image_uri( self.sagemaker_session.boto_region_name, instance_type diff --git a/src/sagemaker/sparkml/model.py b/src/sagemaker/sparkml/model.py index bbb1d0a831..dbcfa535c6 100644 --- a/src/sagemaker/sparkml/model.py +++ b/src/sagemaker/sparkml/model.py @@ -94,9 +94,11 @@ def __init__(self, model_data, role=None, spark_version=2.2, sagemaker_session=N # For local mode, sagemaker_session should be passed as None but we need a session to get # boto_region_name region_name = (sagemaker_session or Session()).boto_region_name - image = "{}/{}:{}".format(registry(region_name, framework_name), repo_name, spark_version) + image_uri = "{}/{}:{}".format( + registry(region_name, framework_name), repo_name, spark_version + ) super(SparkMLModel, self).__init__( - image, + image_uri, model_data, role, predictor_cls=SparkMLPredictor, diff --git a/src/sagemaker/tensorflow/estimator.py b/src/sagemaker/tensorflow/estimator.py index 49eba2100c..a642ef0cb6 100644 --- a/src/sagemaker/tensorflow/estimator.py +++ b/src/sagemaker/tensorflow/estimator.py @@ -192,8 +192,9 @@ def _prepare_init_params_from_job_description(cls, job_details, model_channel_na job_details, model_channel_name ) - image_uri = init_params.pop("image") + image_uri = init_params.pop("image_uri") framework, py_version, tag, script_mode = fw.framework_name_from_image(image_uri) + if not framework: # If we were unable to parse the framework name from the image, it is not one of our # officially supported images, so just add the image to the init params. @@ -270,8 +271,8 @@ def create_model( """ kwargs["name"] = self._get_or_create_name(kwargs.get("name")) - if "image" not in kwargs: - kwargs["image"] = self.image_uri + if "image_uri" not in kwargs: + kwargs["image_uri"] = self.image_uri if "enable_network_isolation" not in kwargs: kwargs["enable_network_isolation"] = self.enable_network_isolation() diff --git a/src/sagemaker/tensorflow/model.py b/src/sagemaker/tensorflow/model.py index 2214f73c9b..fe8e6cfafd 100644 --- a/src/sagemaker/tensorflow/model.py +++ b/src/sagemaker/tensorflow/model.py @@ -136,7 +136,7 @@ def __init__( model_data, role, entry_point=None, - image=None, + image_uri=None, framework_version=None, container_log_level=None, predictor_cls=TensorFlowPredictor, @@ -156,12 +156,12 @@ def __init__( file which should be executed as the entry point to model hosting. If ``source_dir`` is specified, then ``entry_point`` must point to a file located at the root of ``source_dir``. - image (str): A Docker image URI (default: None). If not specified, a + image_uri (str): A Docker image URI (default: None). If not specified, a default image for TensorFlow Serving will be used. If - ``framework_version`` is ``None``, then ``image`` is required. + ``framework_version`` is ``None``, then ``image_uri`` is required. If also ``None``, then a ``ValueError`` will be raised. framework_version (str): Optional. TensorFlow Serving version you - want to use. Defaults to ``None``. Required unless ``image`` is + want to use. Defaults to ``None``. Required unless ``image_uri`` is provided. container_log_level (int): Log level to use within the container (default: logging.ERROR). Valid values are defined in the Python @@ -178,17 +178,17 @@ def __init__( :class:`~sagemaker.model.FrameworkModel` and :class:`~sagemaker.model.Model`. """ - if framework_version is None and image is None: + if framework_version is None and image_uri is None: raise ValueError( - "Both framework_version and image were None. " - "Either specify framework_version or specify image." + "Both framework_version and image_uri were None. " + "Either specify framework_version or specify image_uri." ) self.framework_version = framework_version super(TensorFlowModel, self).__init__( model_data=model_data, role=role, - image=image, + image_uri=image_uri, predictor_cls=predictor_cls, entry_point=entry_point, **kwargs @@ -232,16 +232,18 @@ def prepare_container_def(self, instance_type=None, accelerator_type=None): instance_type: accelerator_type: """ - if self.image is None and instance_type is None: + if self.image_uri is None and instance_type is None: raise ValueError( "Must supply either an instance type (for choosing CPU vs GPU) or an image URI." ) - image = self._get_image_uri(instance_type, accelerator_type) + image_uri = self._get_image_uri(instance_type, accelerator_type) env = self._get_container_env() if self.entry_point: - key_prefix = sagemaker.fw_utils.model_code_key_prefix(self.key_prefix, self.name, image) + key_prefix = sagemaker.fw_utils.model_code_key_prefix( + self.key_prefix, self.name, image_uri + ) bucket = self.bucket or self.sagemaker_session.default_bucket() model_data = "s3://{}/{}/model.tar.gz".format(bucket, key_prefix) @@ -258,7 +260,7 @@ def prepare_container_def(self, instance_type=None, accelerator_type=None): else: model_data = self.model_data - return sagemaker.container_def(image, model_data, env) + return sagemaker.container_def(image_uri, model_data, env) def _get_container_env(self): """Placeholder docstring""" @@ -279,8 +281,8 @@ def _get_image_uri(self, instance_type, accelerator_type=None): instance_type: accelerator_type: """ - if self.image: - return self.image + if self.image_uri: + return self.image_uri region_name = self.sagemaker_session.boto_region_name return create_image_uri( diff --git a/src/sagemaker/tuner.py b/src/sagemaker/tuner.py index f91b853ed5..38ac60440a 100644 --- a/src/sagemaker/tuner.py +++ b/src/sagemaker/tuner.py @@ -1511,7 +1511,7 @@ def _prepare_training_config( if isinstance(estimator, sagemaker.algorithm.AlgorithmEstimator): training_config["algorithm_arn"] = estimator.algorithm_arn else: - training_config["image"] = estimator.train_image() + training_config["image_uri"] = estimator.train_image() training_config["enable_network_isolation"] = estimator.enable_network_isolation() training_config[ diff --git a/src/sagemaker/workflow/airflow.py b/src/sagemaker/workflow/airflow.py index 2730f7d272..fb4ae5f7f6 100644 --- a/src/sagemaker/workflow/airflow.py +++ b/src/sagemaker/workflow/airflow.py @@ -522,7 +522,7 @@ def prepare_framework_container_def(model, instance_type, s3_operations): Returns: dict: The container information of this framework model. """ - deploy_image = model.image + deploy_image = model.image_uri if not deploy_image: region_name = model.sagemaker_session.boto_session.region_name deploy_image = model.serving_image_uri(region_name, instance_type) @@ -560,7 +560,7 @@ def prepare_framework_container_def(model, instance_type, s3_operations): return sagemaker.container_def(deploy_image, model.model_data, deploy_env) -def model_config(model, instance_type=None, role=None, image=None): +def model_config(model, instance_type=None, role=None, image_uri=None): """Export Airflow model config from a SageMaker model Args: @@ -568,7 +568,7 @@ def model_config(model, instance_type=None, role=None, image=None): instance_type (str): The EC2 instance type to deploy this Model to. For example, 'ml.p2.xlarge' role (str): The ``ExecutionRoleArn`` IAM Role ARN for the model - image (str): An container image to use for deploying the model + image_uri (str): An Docker image URI to use for deploying the model Returns: dict: Model config that can be directly used by SageMakerModelOperator @@ -576,7 +576,7 @@ def model_config(model, instance_type=None, role=None, image=None): SageMakerEndpointOperator and SageMakerTransformOperator in Airflow. """ s3_operations = {} - model.image = image or model.image + model.image_uri = image_uri or model.image_uri if isinstance(model, sagemaker.model.FrameworkModel): container_def = prepare_framework_container_def(model, instance_type, s3_operations) @@ -608,7 +608,7 @@ def model_config_from_estimator( task_type, instance_type=None, role=None, - image=None, + image_uri=None, name=None, model_server_workers=None, vpc_config_override=vpc_utils.VPC_CONFIG_DEFAULT, @@ -630,7 +630,7 @@ def model_config_from_estimator( instance_type (str): The EC2 instance type to deploy this Model to. For example, 'ml.p2.xlarge' role (str): The ``ExecutionRoleArn`` IAM Role ARN for the model - image (str): An container image to use for deploying the model + image_uri (str): A Docker image URI to use for deploying the model name (str): Name of the model model_server_workers (int): The number of worker processes used by the inference server. If None, server will use one worker per vCPU. Only @@ -647,7 +647,7 @@ def model_config_from_estimator( update_estimator_from_task(estimator, task_id, task_type) if isinstance(estimator, sagemaker.estimator.Estimator): model = estimator.create_model( - role=role, image=image, vpc_config_override=vpc_config_override + role=role, image_uri=image_uri, vpc_config_override=vpc_config_override ) elif isinstance(estimator, sagemaker.amazon.amazon_estimator.AmazonAlgorithmEstimatorBase): model = estimator.create_model(vpc_config_override=vpc_config_override) @@ -669,7 +669,7 @@ def model_config_from_estimator( ) model.name = name - return model_config(model, instance_type, role, image) + return model_config(model, instance_type, role, image_uri) def transform_config( @@ -809,7 +809,7 @@ def transform_config_from_estimator( role=None, volume_kms_key=None, model_server_workers=None, - image=None, + image_uri=None, vpc_config_override=None, input_filter=None, output_filter=None, @@ -879,7 +879,7 @@ def transform_config_from_estimator( model_server_workers (int): Optional. The number of worker processes used by the inference server. If None, server will use one worker per vCPU. - image (str): An container image to use for deploying the model + image_uri (str): A Docker image URI to use for deploying the model vpc_config_override (dict[str, list[str]]): Override for VpcConfig set on the model. Default: use subnets and security groups from this Estimator. @@ -921,7 +921,7 @@ def transform_config_from_estimator( task_type=task_type, instance_type=instance_type, role=role, - image=image, + image_uri=image_uri, name=model_name, model_server_workers=model_server_workers, vpc_config_override=vpc_config_override, diff --git a/src/sagemaker/xgboost/estimator.py b/src/sagemaker/xgboost/estimator.py index af0f5f2d08..a130898f59 100644 --- a/src/sagemaker/xgboost/estimator.py +++ b/src/sagemaker/xgboost/estimator.py @@ -166,8 +166,8 @@ def create_model( role = role or self.role kwargs["name"] = self._get_or_create_name(kwargs.get("name")) - if "image" not in kwargs: - kwargs["image"] = self.image_uri + if "image_uri" not in kwargs: + kwargs["image_uri"] = self.image_uri return XGBoostModel( self.model_data, @@ -261,7 +261,7 @@ def _prepare_init_params_from_job_description(cls, job_details, model_channel_na """ init_params = super(XGBoost, cls)._prepare_init_params_from_job_description(job_details) - image_uri = init_params.pop("image") + image_uri = init_params.pop("image_uri") framework, py_version, tag, _ = framework_name_from_image(image_uri) init_params["py_version"] = py_version diff --git a/src/sagemaker/xgboost/model.py b/src/sagemaker/xgboost/model.py index 3bc54a5ca3..fd17abeec4 100644 --- a/src/sagemaker/xgboost/model.py +++ b/src/sagemaker/xgboost/model.py @@ -57,7 +57,7 @@ def __init__( role, entry_point, framework_version, - image=None, + image_uri=None, py_version="py3", predictor_cls=XGBoostPredictor, model_server_workers=None, @@ -74,8 +74,8 @@ def __init__( entry_point (str): Path (absolute or relative) to the Python source file which should be executed as the entry point to model hosting. If ``source_dir`` is specified, then ``entry_point`` must point to a file located at the root of ``source_dir``. - image (str): A Docker image URI (default: None). If not specified, a default image for - XGBoos will be used. + image_uri (str): A Docker image URI (default: None). If not specified, a default image + for XGBoost is be used. py_version (str): Python version you want to use for executing your model training code (default: 'py3'). framework_version (str): XGBoost version you want to use for executing your model @@ -95,7 +95,7 @@ def __init__( :class:`~sagemaker.model.Model`. """ super(XGBoostModel, self).__init__( - model_data, image, role, entry_point, predictor_cls=predictor_cls, **kwargs + model_data, image_uri, role, entry_point, predictor_cls=predictor_cls, **kwargs ) if py_version == "py2": @@ -119,7 +119,7 @@ def prepare_container_def(self, instance_type=None, accelerator_type=None): Returns: dict[str, str]: A container definition object usable with the CreateModel API. """ - deploy_image = self.image + deploy_image = self.image_uri if not deploy_image: deploy_image = self.serving_image_uri( self.sagemaker_session.boto_region_name, instance_type diff --git a/tests/integ/test_inference_pipeline.py b/tests/integ/test_inference_pipeline.py index 71bfd0258f..e9e69642aa 100644 --- a/tests/integ/test_inference_pipeline.py +++ b/tests/integ/test_inference_pipeline.py @@ -69,7 +69,7 @@ def test_inference_pipeline_batch_transform(sagemaker_session, cpu_instance_type ) xgb_image = get_image_uri(sagemaker_session.boto_region_name, "xgboost") xgb_model = Model( - model_data=xgb_model_data, image=xgb_image, sagemaker_session=sagemaker_session + model_data=xgb_model_data, image_uri=xgb_image, sagemaker_session=sagemaker_session ) model = PipelineModel( models=[sparkml_model, xgb_model], @@ -119,7 +119,7 @@ def test_inference_pipeline_model_deploy(sagemaker_session, cpu_instance_type): ) xgb_image = get_image_uri(sagemaker_session.boto_region_name, "xgboost") xgb_model = Model( - model_data=xgb_model_data, image=xgb_image, sagemaker_session=sagemaker_session + model_data=xgb_model_data, image_uri=xgb_image, sagemaker_session=sagemaker_session ) model = PipelineModel( models=[sparkml_model, xgb_model], @@ -173,7 +173,7 @@ def test_inference_pipeline_model_deploy_and_update_endpoint( ) xgb_image = get_image_uri(sagemaker_session.boto_region_name, "xgboost") xgb_model = Model( - model_data=xgb_model_data, image=xgb_image, sagemaker_session=sagemaker_session + model_data=xgb_model_data, image_uri=xgb_image, sagemaker_session=sagemaker_session ) model = PipelineModel( models=[sparkml_model, xgb_model], diff --git a/tests/integ/test_multidatamodel.py b/tests/integ/test_multidatamodel.py index 0864c45b20..450762caa8 100644 --- a/tests/integ/test_multidatamodel.py +++ b/tests/integ/test_multidatamodel.py @@ -136,7 +136,7 @@ def test_multi_data_model_deploy_pretrained_models( multi_data_model = MultiDataModel( name=model_name, model_data_prefix=model_data_prefix, - image=container_image, + image_uri=container_image, role=ROLE, sagemaker_session=sagemaker_session, ) @@ -194,7 +194,7 @@ def test_multi_data_model_deploy_pretrained_models_local_mode(container_image, s multi_data_model = MultiDataModel( name=model_name, model_data_prefix=model_data_prefix, - image=container_image, + image_uri=container_image, role=ROLE, sagemaker_session=sagemaker_session, ) @@ -442,7 +442,7 @@ def __rcf_training_job( # Replace the container image value with a multi-model container image for now since the # frameworks do not support multi-model container image yet. rcf_model = rcf.create_model() - rcf_model.image = container_image + rcf_model.image_uri = container_image return rcf_model @@ -463,7 +463,7 @@ def test_multi_data_model_deploy_pretrained_models_update_endpoint( multi_data_model = MultiDataModel( name=model_name, model_data_prefix=model_data_prefix, - image=container_image, + image_uri=container_image, role=ROLE, sagemaker_session=sagemaker_session, ) diff --git a/tests/unit/sagemaker/model/test_neo.py b/tests/unit/sagemaker/model/test_neo.py index a7906ef9cf..4ed03720f8 100644 --- a/tests/unit/sagemaker/model/test_neo.py +++ b/tests/unit/sagemaker/model/test_neo.py @@ -58,7 +58,7 @@ def test_compile_model_for_inferentia(sagemaker_session): "{}.dkr.ecr.{}.amazonaws.com/sagemaker-neo-tensorflow:1.15.0-inf-py3".format( NEO_REGION_ACCOUNT, REGION ) - == model.image + == model.image_uri ) assert model._is_compiled_model is True diff --git a/tests/unit/sagemaker/tensorflow/test_estimator.py b/tests/unit/sagemaker/tensorflow/test_estimator.py index fd2ad7bf87..f967a0bf4b 100644 --- a/tests/unit/sagemaker/tensorflow/test_estimator.py +++ b/tests/unit/sagemaker/tensorflow/test_estimator.py @@ -104,7 +104,7 @@ def _hyperparameters(horovod=False): def _create_train_job(tf_version, horovod=False, ps=False, py_version="py2"): conf = { - "image": _image_uri(tf_version, py_version), + "image_uri": _image_uri(tf_version, py_version), "input_mode": "File", "input_config": [ { @@ -272,7 +272,7 @@ def test_create_model_with_custom_image(sagemaker_session): tf.fit(inputs="s3://mybucket/train", job_name=job_name) model = tf.create_model() - assert model.image == custom_image + assert model.image_uri == custom_image @patch("sagemaker.tensorflow.estimator.TensorFlow.create_model") diff --git a/tests/unit/sagemaker/tensorflow/test_tfs.py b/tests/unit/sagemaker/tensorflow/test_tfs.py index 57d0da56aa..2fd6b75078 100644 --- a/tests/unit/sagemaker/tensorflow/test_tfs.py +++ b/tests/unit/sagemaker/tensorflow/test_tfs.py @@ -159,7 +159,7 @@ def test_tfs_model_with_custom_image(sagemaker_session, tf_version): "s3://some/data.tar.gz", role=ROLE, framework_version=tf_version, - image="my-image", + image_uri="my-image", sagemaker_session=sagemaker_session, ) cdef = model.prepare_container_def(INSTANCE_TYPE) @@ -176,14 +176,14 @@ def test_tfs_model_with_entry_point( entry_point="train.py", role=ROLE, framework_version=tf_version, - image="my-image", + image_uri="my-image", sagemaker_session=sagemaker_session, model_kms_key="kms-key", ) model.prepare_container_def(INSTANCE_TYPE) - model_code_key_prefix.assert_called_with(model.key_prefix, model.name, model.image) + model_code_key_prefix.assert_called_with(model.key_prefix, model.name, model.image_uri) repack_model.assert_called_with( "train.py", @@ -205,13 +205,13 @@ def test_tfs_model_with_source(repack_model, model_code_key_prefix, sagemaker_se source_dir="src", role=ROLE, framework_version=tf_version, - image="my-image", + image_uri="my-image", sagemaker_session=sagemaker_session, ) model.prepare_container_def(INSTANCE_TYPE) - model_code_key_prefix.assert_called_with(model.key_prefix, model.name, model.image) + model_code_key_prefix.assert_called_with(model.key_prefix, model.name, model.image_uri) repack_model.assert_called_with( "train.py", @@ -235,13 +235,13 @@ def test_tfs_model_with_dependencies( dependencies=["src", "lib"], role=ROLE, framework_version=tf_version, - image="my-image", + image_uri="my-image", sagemaker_session=sagemaker_session, ) model.prepare_container_def(INSTANCE_TYPE) - model_code_key_prefix.assert_called_with(model.key_prefix, model.name, model.image) + model_code_key_prefix.assert_called_with(model.key_prefix, model.name, model.image_uri) repack_model.assert_called_with( "train.py", diff --git a/tests/unit/test_airflow.py b/tests/unit/test_airflow.py index 658368a3a0..a6cc96e7f9 100644 --- a/tests/unit/test_airflow.py +++ b/tests/unit/test_airflow.py @@ -896,7 +896,7 @@ def test_merge_s3_operations(): def test_byo_model_config(sagemaker_session): byo_model = model.Model( model_data="{{ model_data }}", - image="{{ image }}", + image_uri="{{ image_uri }}", role="{{ role }}", env={"{{ key }}": "{{ value }}"}, name="model", @@ -907,7 +907,7 @@ def test_byo_model_config(sagemaker_session): expected_config = { "ModelName": "model", "PrimaryContainer": { - "Image": "{{ image }}", + "Image": "{{ image_uri }}", "Environment": {"{{ key }}": "{{ value }}"}, "ModelDataUrl": "{{ model_data }}", }, @@ -920,7 +920,7 @@ def test_byo_model_config(sagemaker_session): def test_byo_framework_model_config(sagemaker_session): byo_model = model.FrameworkModel( model_data="{{ model_data }}", - image="{{ image }}", + image_uri="{{ image_uri }}", role="{{ role }}", entry_point="{{ entry_point }}", source_dir="{{ source_dir }}", @@ -933,7 +933,7 @@ def test_byo_framework_model_config(sagemaker_session): expected_config = { "ModelName": "model", "PrimaryContainer": { - "Image": "{{ image }}", + "Image": "{{ image_uri }}", "Environment": { "{{ key }}": "{{ value }}", "SAGEMAKER_PROGRAM": "{{ entry_point }}", @@ -967,7 +967,7 @@ def test_framework_model_config(sagemaker_session): role="{{ role }}", entry_point="{{ entry_point }}", source_dir="{{ source_dir }}", - image=None, + image_uri=None, py_version="py3", framework_version="5.0.0", model_server_workers="{{ model_server_worker }}", @@ -1346,7 +1346,7 @@ def test_deploy_framework_model_config(sagemaker_session): role="{{ role }}", entry_point="{{ entry_point }}", source_dir="{{ source_dir }}", - image=None, + image_uri=None, py_version="py3", framework_version="5.0.0", model_server_workers="{{ model_server_worker }}", diff --git a/tests/unit/test_chainer.py b/tests/unit/test_chainer.py index ed028404e2..89ef210ef9 100644 --- a/tests/unit/test_chainer.py +++ b/tests/unit/test_chainer.py @@ -117,7 +117,7 @@ def _chainer_estimator( def _create_train_job(version, py_version): return { - "image": _get_full_cpu_image_uri(version, py_version), + "image_uri": _get_full_cpu_image_uri(version, py_version), "input_mode": "File", "input_config": [ { @@ -329,7 +329,7 @@ def test_create_model_with_custom_image(sagemaker_session): chainer.fit(inputs="s3://mybucket/train", job_name="new_name") model = chainer.create_model() - assert model.image == custom_image + assert model.image_uri == custom_image @patch("sagemaker.utils.create_tar_file", MagicMock()) diff --git a/tests/unit/test_endpoint_from_job.py b/tests/unit/test_endpoint_from_job.py index 6c795b5080..189d103cbe 100644 --- a/tests/unit/test_endpoint_from_job.py +++ b/tests/unit/test_endpoint_from_job.py @@ -67,7 +67,7 @@ def test_all_defaults_no_existing_entities(sagemaker_session): expected_args = original_args.copy() expected_args.pop("job_name") expected_args["model_s3_location"] = S3_MODEL_ARTIFACTS - expected_args["deployment_image"] = IMAGE + expected_args["deployment_image_uri"] = IMAGE expected_args["role"] = TRAIN_ROLE expected_args["name"] = JOB_NAME expected_args["model_environment_vars"] = None @@ -85,7 +85,7 @@ def test_no_defaults_no_existing_entities(sagemaker_session): "job_name": JOB_NAME, "initial_instance_count": INITIAL_INSTANCE_COUNT, "instance_type": INSTANCE_TYPE, - "deployment_image": DEPLOY_IMAGE, + "deployment_image_uri": DEPLOY_IMAGE, "role": DEPLOY_ROLE, "name": NEW_ENTITY_NAME, "model_environment_vars": ENV_VARS, diff --git a/tests/unit/test_endpoint_from_model_data.py b/tests/unit/test_endpoint_from_model_data.py index 41c52268b7..8e0b1833e7 100644 --- a/tests/unit/test_endpoint_from_model_data.py +++ b/tests/unit/test_endpoint_from_model_data.py @@ -56,7 +56,7 @@ def sagemaker_session(): def test_all_defaults_no_existing_entities(name_from_image_mock, sagemaker_session): returned_name = sagemaker_session.endpoint_from_model_data( model_s3_location=S3_MODEL_ARTIFACTS, - deployment_image=DEPLOY_IMAGE, + deployment_image_uri=DEPLOY_IMAGE, initial_instance_count=INITIAL_INSTANCE_COUNT, instance_type=INSTANCE_TYPE, role=DEPLOY_ROLE, @@ -96,7 +96,7 @@ def test_no_defaults_no_existing_entities(name_from_image_mock, sagemaker_sessio returned_name = sagemaker_session.endpoint_from_model_data( model_s3_location=S3_MODEL_ARTIFACTS, - deployment_image=DEPLOY_IMAGE, + deployment_image_uri=DEPLOY_IMAGE, initial_instance_count=INITIAL_INSTANCE_COUNT, instance_type=INSTANCE_TYPE, role=DEPLOY_ROLE, @@ -145,7 +145,7 @@ def test_model_and_endpoint_config_exist(name_from_image_mock, sagemaker_session sagemaker_session.endpoint_from_model_data( model_s3_location=S3_MODEL_ARTIFACTS, - deployment_image=DEPLOY_IMAGE, + deployment_image_uri=DEPLOY_IMAGE, initial_instance_count=INITIAL_INSTANCE_COUNT, instance_type=INSTANCE_TYPE, wait=False, diff --git a/tests/unit/test_estimator.py b/tests/unit/test_estimator.py index 6e814fc6fc..763e2cbada 100644 --- a/tests/unit/test_estimator.py +++ b/tests/unit/test_estimator.py @@ -67,7 +67,7 @@ RETURNED_JOB_DESCRIPTION = { "AlgorithmSpecification": { "TrainingInputMode": "File", - "TrainingImage": "1.dkr.ecr.us-west-2.amazonaws.com/sagemaker-other-py2-cpu:1.0.4", + "TrainingImage": "1.dkr.ecr.us-west-2.amazonaws.com/sagemaker-other:1.0.4", }, "HyperParameters": { "sagemaker_submit_directory": '"s3://some/sourcedir.tar.gz"', @@ -141,7 +141,7 @@ def _prepare_init_params_from_job_description(cls, job_details, model_channel_na init_params = super(DummyFramework, cls)._prepare_init_params_from_job_description( job_details, model_channel_name ) - init_params.pop("image", None) + init_params.pop("image_uri", None) return init_params @@ -222,7 +222,7 @@ def test_framework_all_init_args(sagemaker_session): "input_mode": "inputmode", "tags": [{"foo": "bar"}], "hyperparameters": {}, - "image": "fakeimage", + "image_uri": "fakeimage", "input_config": [ { "ChannelName": "training", @@ -287,7 +287,7 @@ def test_framework_with_spot_and_checkpoints(sagemaker_session): "input_mode": "inputmode", "tags": [{"foo": "bar"}], "hyperparameters": {}, - "image": "fakeimage", + "image_uri": "fakeimage", "input_config": [ { "ChannelName": "training", @@ -790,7 +790,7 @@ def test_fit_verify_job_name(strftime, sagemaker_session): _, _, train_kwargs = sagemaker_session.train.mock_calls[0] assert train_kwargs["hyperparameters"]["sagemaker_enable_cloudwatch_metrics"] - assert train_kwargs["image"] == IMAGE_URI + assert train_kwargs["image_uri"] == IMAGE_URI assert train_kwargs["input_mode"] == "File" assert train_kwargs["tags"] == TAGS assert train_kwargs["job_name"] == JOB_NAME @@ -1665,7 +1665,7 @@ def test_unsupported_type_in_dict(): NO_INPUT_TRAIN_CALL = { "hyperparameters": {}, - "image": IMAGE_URI, + "image_uri": IMAGE_URI, "input_config": None, "input_mode": "File", "output_config": {"S3OutputPath": OUTPUT_PATH}, @@ -2276,14 +2276,13 @@ def test_file_output_path_not_supported_outside_local_mode(session_class): def test_prepare_init_params_from_job_description_with_image_training_job(): - init_params = EstimatorBase._prepare_init_params_from_job_description( job_details=RETURNED_JOB_DESCRIPTION ) assert init_params["role"] == "arn:aws:iam::366:role/SageMakerRole" assert init_params["train_instance_count"] == 1 - assert init_params["image"] == "1.dkr.ecr.us-west-2.amazonaws.com/sagemaker-other-py2-cpu:1.0.4" + assert init_params["image_uri"] == "1.dkr.ecr.us-west-2.amazonaws.com/sagemaker-other:1.0.4" def test_prepare_init_params_from_job_description_with_algorithm_training_job(): diff --git a/tests/unit/test_fm.py b/tests/unit/test_fm.py index 5c4ac46900..44aae132e5 100644 --- a/tests/unit/test_fm.py +++ b/tests/unit/test_fm.py @@ -313,7 +313,7 @@ def test_model_image(sagemaker_session): fm.fit(data, MINI_BATCH_SIZE) model = fm.create_model() - assert model.image == registry(REGION, "factorization-machines") + "/factorization-machines:1" + assert model.image_uri == registry(REGION, "factorization-machines") + "/factorization-machines:1" def test_predictor_type(sagemaker_session): diff --git a/tests/unit/test_ipinsights.py b/tests/unit/test_ipinsights.py index 57bedb9a4c..020057999f 100644 --- a/tests/unit/test_ipinsights.py +++ b/tests/unit/test_ipinsights.py @@ -288,7 +288,7 @@ def test_model_image(sagemaker_session): ipinsights.fit(data, MINI_BATCH_SIZE) model = ipinsights.create_model() - assert model.image == registry(REGION, "ipinsights") + "/ipinsights:1" + assert model.image_uri == registry(REGION, "ipinsights") + "/ipinsights:1" def test_predictor_type(sagemaker_session): diff --git a/tests/unit/test_job.py b/tests/unit/test_job.py index 694f2d7e7a..8bc4019bbc 100644 --- a/tests/unit/test_job.py +++ b/tests/unit/test_job.py @@ -95,7 +95,7 @@ def _prepare_init_params_from_job_description(cls, job_details, model_channel_na init_params = super(DummyFramework, cls)._prepare_init_params_from_job_description( job_details, model_channel_name ) - init_params.pop("image", None) + init_params.pop("image_uri", None) return init_params diff --git a/tests/unit/test_kmeans.py b/tests/unit/test_kmeans.py index 555b78b451..d87905868b 100644 --- a/tests/unit/test_kmeans.py +++ b/tests/unit/test_kmeans.py @@ -257,7 +257,7 @@ def test_model_image(sagemaker_session): kmeans.fit(data, MINI_BATCH_SIZE) model = kmeans.create_model() - assert model.image == registry(REGION, "kmeans") + "/kmeans:1" + assert model.image_uri == registry(REGION, "kmeans") + "/kmeans:1" def test_predictor_type(sagemaker_session): diff --git a/tests/unit/test_knn.py b/tests/unit/test_knn.py index b839cdefa9..b1eb15c927 100644 --- a/tests/unit/test_knn.py +++ b/tests/unit/test_knn.py @@ -279,7 +279,7 @@ def test_model_image(sagemaker_session): knn.fit(data, MINI_BATCH_SIZE) model = knn.create_model() - assert model.image == registry(REGION, "knn") + "/knn:1" + assert model.image_uri == registry(REGION, "knn") + "/knn:1" def test_predictor_type(sagemaker_session): diff --git a/tests/unit/test_lda.py b/tests/unit/test_lda.py index 4bb4d5a594..7f7dab43d3 100644 --- a/tests/unit/test_lda.py +++ b/tests/unit/test_lda.py @@ -215,7 +215,7 @@ def test_model_image(sagemaker_session): lda.fit(data, MINI_BATCH_SZIE) model = lda.create_model() - assert model.image == registry(REGION, "lda") + "/lda:1" + assert model.image_uri == registry(REGION, "lda") + "/lda:1" def test_predictor_type(sagemaker_session): diff --git a/tests/unit/test_linear_learner.py b/tests/unit/test_linear_learner.py index 3b16e85c02..3f4123e68b 100644 --- a/tests/unit/test_linear_learner.py +++ b/tests/unit/test_linear_learner.py @@ -416,7 +416,7 @@ def test_model_image(sagemaker_session): lr.fit(data) model = lr.create_model() - assert model.image == registry(REGION, "linear-learner") + "/linear-learner:1" + assert model.image_uri == registry(REGION, "linear-learner") + "/linear-learner:1" def test_predictor_type(sagemaker_session): diff --git a/tests/unit/test_multidatamodel.py b/tests/unit/test_multidatamodel.py index f5b8133968..f9121764ff 100644 --- a/tests/unit/test_multidatamodel.py +++ b/tests/unit/test_multidatamodel.py @@ -94,7 +94,7 @@ def multi_data_model(sagemaker_session): return MultiDataModel( name=MODEL_NAME, model_data_prefix=VALID_MULTI_MODEL_DATA_PREFIX, - image=IMAGE, + image_uri=IMAGE, role=ROLE, sagemaker_session=sagemaker_session, ) @@ -118,7 +118,7 @@ def test_multi_data_model_create_with_invalid_model_data_prefix(): invalid_model_data_prefix = "https://mybucket/path/" with pytest.raises(ValueError) as ex: MultiDataModel( - name=MODEL_NAME, model_data_prefix=invalid_model_data_prefix, image=IMAGE, role=ROLE + name=MODEL_NAME, model_data_prefix=invalid_model_data_prefix, image_uri=IMAGE, role=ROLE ) err_msg = 'ValueError: Expecting S3 model prefix beginning with "s3://". Received: "{}"'.format( invalid_model_data_prefix @@ -131,13 +131,13 @@ def test_multi_data_model_create_with_invalid_arguments(sagemaker_session, mxnet MultiDataModel( name=MODEL_NAME, model_data_prefix=VALID_MULTI_MODEL_DATA_PREFIX, - image=IMAGE, + image_uri=IMAGE, role=ROLE, sagemaker_session=sagemaker_session, model=mxnet_model, ) assert ( - "Parameters image, role or kwargs are not permitted when model parameter is passed." + "Parameters image_uri, role, and kwargs are not permitted when model parameter is passed." in str(ex) ) @@ -146,7 +146,7 @@ def test_multi_data_model_create(sagemaker_session): model = MultiDataModel( name=MODEL_NAME, model_data_prefix=VALID_MULTI_MODEL_DATA_PREFIX, - image=IMAGE, + image_uri=IMAGE, role=ROLE, sagemaker_session=sagemaker_session, ) @@ -155,7 +155,7 @@ def test_multi_data_model_create(sagemaker_session): assert model.name == MODEL_NAME assert model.model_data_prefix == VALID_MULTI_MODEL_DATA_PREFIX assert model.role == ROLE - assert model.image == IMAGE + assert model.image_uri == IMAGE assert model.vpc_config is None @@ -168,7 +168,7 @@ def test_multi_data_model_create_with_model_arg_only(mxnet_model): assert model.model_data_prefix == VALID_MULTI_MODEL_DATA_PREFIX assert model.model == mxnet_model assert hasattr(model, "role") is False - assert hasattr(model, "image") is False + assert hasattr(model, "image_uri") is False @patch("sagemaker.fw_utils.tar_and_upload_dir", MagicMock()) @@ -202,7 +202,7 @@ def test_deploy_multi_data_model(sagemaker_session): model = MultiDataModel( name=MODEL_NAME, model_data_prefix=VALID_MULTI_MODEL_DATA_PREFIX, - image=IMAGE, + image_uri=IMAGE, role=ROLE, sagemaker_session=sagemaker_session, env={"EXTRA_ENV_MOCK": "MockValue"}, diff --git a/tests/unit/test_mxnet.py b/tests/unit/test_mxnet.py index 12f92b0e6c..e4908d4fc4 100644 --- a/tests/unit/test_mxnet.py +++ b/tests/unit/test_mxnet.py @@ -103,7 +103,7 @@ def skip_if_not_mms_version(mxnet_version): def _get_train_args(job_name): return { - "image": IMAGE, + "image_uri": IMAGE, "input_mode": "File", "input_config": [ { @@ -215,7 +215,7 @@ def test_create_model(name_from_base, sagemaker_session, mxnet_version, mxnet_py assert model.name == model_name assert model.container_log_level == container_log_level assert model.source_dir == source_dir - assert model.image is None + assert model.image_uri is None assert model.vpc_config is None name_from_base.assert_called_with(base_job_name) @@ -290,7 +290,7 @@ def test_create_model_with_custom_image(name_from_base, sagemaker_session): model = mx.create_model() assert model.sagemaker_session == sagemaker_session - assert model.image == custom_image + assert model.image_uri == custom_image assert model.entry_point == SCRIPT_PATH assert model.role == ROLE assert model.name == model_name @@ -394,7 +394,7 @@ def test_mxnet_neo( actual_compile_model_args = sagemaker_session.method_calls[3][2] assert expected_compile_model_args == actual_compile_model_args - assert compiled_model.image == _neo_inference_image(mxnet_version) + assert compiled_model.image_uri == _neo_inference_image(mxnet_version) predictor = mx.deploy(1, CPU, use_compiled_model=True) assert isinstance(predictor, MXNetPredictor) @@ -750,7 +750,7 @@ def test_create_model_with_custom_hosting_image(sagemaker_session): mx.fit(inputs="s3://mybucket/train", job_name="new_name") model = mx.create_model(image_uri=custom_hosting_image) - assert model.image == custom_hosting_image + assert model.image_uri == custom_hosting_image def test_mx_enable_sm_metrics(sagemaker_session, mxnet_version, mxnet_py_version): @@ -811,5 +811,5 @@ def test_custom_image_estimator_deploy(sagemaker_session, mxnet_version, mxnet_p train_instance_type=INSTANCE_TYPE, ) mx.fit(inputs="s3://mybucket/train", job_name="new_name") - model = mx.create_model(image=custom_image) - assert model.image == custom_image + model = mx.create_model(image_uri=custom_image) + assert model.image_uri == custom_image diff --git a/tests/unit/test_ntm.py b/tests/unit/test_ntm.py index 6fddd4b475..84f5096181 100644 --- a/tests/unit/test_ntm.py +++ b/tests/unit/test_ntm.py @@ -284,7 +284,7 @@ def test_model_image(sagemaker_session): ntm.fit(data, MINI_BATCH_SIZE) model = ntm.create_model() - assert model.image == registry(REGION, "ntm") + "/ntm:1" + assert model.image_uri == registry(REGION, "ntm") + "/ntm:1" def test_predictor_type(sagemaker_session): diff --git a/tests/unit/test_object2vec.py b/tests/unit/test_object2vec.py index fa5f79243d..f4cd907673 100644 --- a/tests/unit/test_object2vec.py +++ b/tests/unit/test_object2vec.py @@ -308,7 +308,7 @@ def test_model_image(sagemaker_session): object2vec.fit(data, MINI_BATCH_SIZE) model = object2vec.create_model() - assert model.image == registry(REGION, "object2vec") + "/object2vec:1" + assert model.image_uri == registry(REGION, "object2vec") + "/object2vec:1" def test_predictor_type(sagemaker_session): diff --git a/tests/unit/test_pca.py b/tests/unit/test_pca.py index ffa6bafe19..bd3e53e75d 100644 --- a/tests/unit/test_pca.py +++ b/tests/unit/test_pca.py @@ -235,7 +235,7 @@ def test_model_image(sagemaker_session): pca.fit(data, MINI_BATCH_SIZE) model = pca.create_model() - assert model.image == registry(REGION, "pca") + "/pca:1" + assert model.image_uri == registry(REGION, "pca") + "/pca:1" def test_predictor_type(sagemaker_session): diff --git a/tests/unit/test_pipeline_model.py b/tests/unit/test_pipeline_model.py index cad40c4cff..0f25bd7d08 100644 --- a/tests/unit/test_pipeline_model.py +++ b/tests/unit/test_pipeline_model.py @@ -30,8 +30,8 @@ ROLE = "some-role" ENV_1 = {"SAGEMAKER_DEFAULT_INVOCATIONS_ACCEPT": "application/json"} ENV_2 = {"SAGEMAKER_DEFAULT_INVOCATIONS_ACCEPT": "text/csv"} -MODEL_CONTAINER_1 = ModelContainer(image=MODEL_IMAGE_1, model_data=MODEL_DATA_1, env=ENV_1) -MODEL_CONTAINER_2 = ModelContainer(image=MODEL_IMAGE_2, model_data=MODEL_DATA_2, env=ENV_2) +MODEL_CONTAINER_1 = ModelContainer(image_uri=MODEL_IMAGE_1, model_data=MODEL_DATA_1, env=ENV_1) +MODEL_CONTAINER_2 = ModelContainer(image_uri=MODEL_IMAGE_2, model_data=MODEL_DATA_2, env=ENV_2) ENDPOINT = "some-ep" diff --git a/tests/unit/test_pytorch.py b/tests/unit/test_pytorch.py index 4057b01758..ba9669bbee 100644 --- a/tests/unit/test_pytorch.py +++ b/tests/unit/test_pytorch.py @@ -114,7 +114,7 @@ def _pytorch_estimator( def _create_train_job(version, py_version): return { - "image": _get_full_cpu_image_uri(version, py_version), + "image_uri": _get_full_cpu_image_uri(version, py_version), "input_mode": "File", "input_config": [ { @@ -262,7 +262,7 @@ def test_create_model_with_custom_image(name_from_base, sagemaker_session): model = pytorch.create_model() assert model.sagemaker_session == sagemaker_session - assert model.image == image + assert model.image_uri == image assert model.entry_point == SCRIPT_PATH assert model.role == ROLE assert model.name == model_name @@ -636,5 +636,5 @@ def test_custom_image_estimator_deploy(sagemaker_session, pytorch_version, pytor sagemaker_session, framework_version=pytorch_version, py_version=pytorch_py_version ) pytorch.fit(inputs="s3://mybucket/train", job_name="new_name") - model = pytorch.create_model(image=custom_image) - assert model.image == custom_image + model = pytorch.create_model(image_uri=custom_image) + assert model.image_uri == custom_image diff --git a/tests/unit/test_randomcutforest.py b/tests/unit/test_randomcutforest.py index d960e45f46..8fc1baf857 100644 --- a/tests/unit/test_randomcutforest.py +++ b/tests/unit/test_randomcutforest.py @@ -232,7 +232,7 @@ def test_model_image(sagemaker_session): randomcutforest.fit(data, MINI_BATCH_SIZE) model = randomcutforest.create_model() - assert model.image == registry(REGION, "randomcutforest") + "/randomcutforest:1" + assert model.image_uri == registry(REGION, "randomcutforest") + "/randomcutforest:1" def test_predictor_type(sagemaker_session): diff --git a/tests/unit/test_rl.py b/tests/unit/test_rl.py index a956ea0ddb..7d86614836 100644 --- a/tests/unit/test_rl.py +++ b/tests/unit/test_rl.py @@ -114,7 +114,7 @@ def _rl_estimator( def _create_train_job(toolkit, toolkit_version, framework): job_name = "{}-{}-{}".format(IMAGE_URI, framework, TIMESTAMP) return { - "image": _get_full_cpu_image_uri(toolkit, toolkit_version, framework), + "image_uri": _get_full_cpu_image_uri(toolkit, toolkit_version, framework), "input_mode": "File", "input_config": [ { @@ -296,7 +296,7 @@ def test_create_model_with_custom_image(name_from_base, sagemaker_session): model = rl.create_model(entry_point=new_entry_point) assert model.sagemaker_session == sagemaker_session - assert model.image == image + assert model.image_uri == image assert model.entry_point == new_entry_point assert model.role == ROLE assert model.name == model_name @@ -646,5 +646,5 @@ def test_custom_image_estimator_deploy(sagemaker_session): custom_image = "mycustomimage:latest" rl = _rl_estimator(sagemaker_session) rl.fit(inputs="s3://mybucket/train", job_name="new_name") - model = rl.create_model(image=custom_image) - assert model.image == custom_image + model = rl.create_model(image_uri=custom_image) + assert model.image_uri == custom_image diff --git a/tests/unit/test_session.py b/tests/unit/test_session.py index 103a74af54..36212a7885 100644 --- a/tests/unit/test_session.py +++ b/tests/unit/test_session.py @@ -685,7 +685,7 @@ def test_train_pack_to_request(sagemaker_session): stop_cond = {"MaxRuntimeInSeconds": MAX_TIME} sagemaker_session.train( - image=IMAGE, + image_uri=IMAGE, input_mode="File", input_config=in_config, role=EXPANDED_ROLE, @@ -842,7 +842,7 @@ def assert_create_tuning_job_request(**kwrags): max_parallel_jobs=5, parameter_ranges=SAMPLE_PARAM_RANGES, static_hyperparameters=STATIC_HPs, - image="dummy-image-1", + image_uri="dummy-image-1", input_mode="File", metric_definitions=SAMPLE_METRIC_DEF, role=EXPANDED_ROLE, @@ -888,16 +888,16 @@ def test_create_tuning_job_with_both_training_config_and_list(sagemaker_session) "max_parallel_jobs": 5, "parameter_ranges": SAMPLE_PARAM_RANGES, }, - training_config={"static_hyperparameters": STATIC_HPs, "image": "dummy-image-1"}, + training_config={"static_hyperparameters": STATIC_HPs, "image_uri": "dummy-image-1"}, training_config_list=[ { "static_hyperparameters": STATIC_HPs, - "image": "dummy-image-1", + "image_uri": "dummy-image-1", "estimator_name": "estimator_1", }, { "static_hyperparameters": STATIC_HPs_2, - "image": "dummy-image-2", + "image_uri": "dummy-image-2", "estimator_name": "estimator_2", }, ], @@ -930,7 +930,7 @@ def assert_create_tuning_job_request(**kwrags): }, training_config={ "static_hyperparameters": STATIC_HPs, - "image": "dummy-image-1", + "image_uri": "dummy-image-1", "input_mode": "File", "metric_definitions": SAMPLE_METRIC_DEF, "role": EXPANDED_ROLE, @@ -967,7 +967,7 @@ def assert_create_tuning_job_request(**kwrags): training_config_list=[ { "static_hyperparameters": STATIC_HPs, - "image": "dummy-image-1", + "image_uri": "dummy-image-1", "input_mode": "File", "metric_definitions": SAMPLE_METRIC_DEF, "role": EXPANDED_ROLE, @@ -982,7 +982,7 @@ def assert_create_tuning_job_request(**kwrags): }, { "static_hyperparameters": STATIC_HPs_2, - "image": "dummy-image-2", + "image_uri": "dummy-image-2", "input_mode": "File", "metric_definitions": SAMPLE_METRIC_DEF_2, "role": EXPANDED_ROLE, @@ -1023,7 +1023,7 @@ def assert_create_tuning_job_request(**kwrags): max_parallel_jobs=5, parameter_ranges=SAMPLE_PARAM_RANGES, static_hyperparameters=STATIC_HPs, - image="dummy-image-1", + image_uri="dummy-image-1", input_mode="File", metric_definitions=SAMPLE_METRIC_DEF, role=EXPANDED_ROLE, @@ -1058,7 +1058,7 @@ def assert_create_tuning_job_request(**kwrags): max_parallel_jobs=5, parameter_ranges=SAMPLE_PARAM_RANGES, static_hyperparameters=STATIC_HPs, - image="dummy-image-1", + image_uri="dummy-image-1", input_mode="File", metric_definitions=SAMPLE_METRIC_DEF, role=EXPANDED_ROLE, @@ -1101,7 +1101,7 @@ def assert_create_tuning_job_request(**kwargs): max_parallel_jobs=5, parameter_ranges=SAMPLE_PARAM_RANGES, static_hyperparameters=STATIC_HPs, - image="dummy-image-1", + image_uri="dummy-image-1", input_mode="File", metric_definitions=SAMPLE_METRIC_DEF, role=EXPANDED_ROLE, @@ -1190,7 +1190,7 @@ def test_train_pack_to_request_with_optional_params(sagemaker_session): hyperparameters = {"foo": "bar"} sagemaker_session.train( - image=IMAGE, + image_uri=IMAGE, input_mode="File", input_config=in_config, role=EXPANDED_ROLE, @@ -1697,7 +1697,7 @@ def test_create_model_from_job_with_tags(sagemaker_session): def test_create_model_from_job_with_image(sagemaker_session): ims = sagemaker_session ims.sagemaker_client.describe_training_job.return_value = COMPLETED_DESCRIBE_JOB_RESULT - ims.create_model_from_job(JOB_NAME, primary_container_image="some-image") + ims.create_model_from_job(JOB_NAME, primary_container_image_uri="some-image") [create_model_call] = ims.sagemaker_client.create_model.call_args_list assert dict(create_model_call[1]["PrimaryContainer"])["Image"] == "some-image" @@ -1706,7 +1706,7 @@ def test_create_model_from_job_with_container_def(sagemaker_session): ims = sagemaker_session ims.sagemaker_client.describe_training_job.return_value = COMPLETED_DESCRIBE_JOB_RESULT ims.create_model_from_job( - JOB_NAME, primary_container_image="some-image", model_data_url="some-data", env={"a": "b"} + JOB_NAME, primary_container_image_uri="some-image", model_data_url="some-data", env={"a": "b"} ) [create_model_call] = ims.sagemaker_client.create_model.call_args_list c_def = create_model_call[1]["PrimaryContainer"] diff --git a/tests/unit/test_sklearn.py b/tests/unit/test_sklearn.py index 845ac2e4bb..9dcda10370 100644 --- a/tests/unit/test_sklearn.py +++ b/tests/unit/test_sklearn.py @@ -98,7 +98,7 @@ def _sklearn_estimator( def _create_train_job(version): return { - "image": _get_full_cpu_image_uri(version), + "image_uri": _get_full_cpu_image_uri(version), "input_mode": "File", "input_config": [ { @@ -264,7 +264,7 @@ def test_create_model_with_optional_params(sagemaker_session, sklearn_version): dependencies = ["/directory/a", "/directory/b"] model_name = "model-name" model = sklearn.create_model( - image=custom_image, + image_uri=custom_image, role=new_role, model_server_workers=model_server_workers, vpc_config_override=vpc_config, @@ -274,7 +274,7 @@ def test_create_model_with_optional_params(sagemaker_session, sklearn_version): name=model_name, ) - assert model.image == custom_image + assert model.image_uri == custom_image assert model.role == new_role assert model.model_server_workers == model_server_workers assert model.vpc_config == vpc_config @@ -303,7 +303,7 @@ def test_create_model_with_custom_image(sagemaker_session): sklearn.fit(inputs="s3://mybucket/train", job_name="new_name") model = sklearn.create_model() - assert model.image == custom_image + assert model.image_uri == custom_image @patch("time.strftime", return_value=TIMESTAMP) @@ -598,5 +598,5 @@ def test_custom_image_estimator_deploy(sagemaker_session, sklearn_version): custom_image = "mycustomimage:latest" sklearn = _sklearn_estimator(sagemaker_session, sklearn_version) sklearn.fit(inputs="s3://mybucket/train", job_name="new_name") - model = sklearn.create_model(image=custom_image) - assert model.image == custom_image + model = sklearn.create_model(image_uri=custom_image) + assert model.image_uri == custom_image diff --git a/tests/unit/test_sparkml_serving.py b/tests/unit/test_sparkml_serving.py index fa615d9d43..9499abf8af 100644 --- a/tests/unit/test_sparkml_serving.py +++ b/tests/unit/test_sparkml_serving.py @@ -49,7 +49,7 @@ def sagemaker_session(): def test_sparkml_model(sagemaker_session): sparkml = SparkMLModel(sagemaker_session=sagemaker_session, model_data=MODEL_DATA, role=ROLE) - assert sparkml.image == registry(REGION, "sparkml-serving") + "/sagemaker-sparkml-serving:2.2" + assert sparkml.image_uri == registry(REGION, "sparkml-serving") + "/sagemaker-sparkml-serving:2.2" def test_predictor_type(sagemaker_session): diff --git a/tests/unit/test_tuner.py b/tests/unit/test_tuner.py index 12932431c8..fee6e6cdd6 100644 --- a/tests/unit/test_tuner.py +++ b/tests/unit/test_tuner.py @@ -391,7 +391,7 @@ def test_fit_multi_estimators(sagemaker_session): assert training_config_one["objective_type"] == "Minimize" assert training_config_one["objective_metric_name"] == OBJECTIVE_METRIC_NAME assert training_config_one["input_config"] is None - assert training_config_one["image"] == estimator_one.train_image() + assert training_config_one["image_uri"] == estimator_one.train_image() assert training_config_one["metric_definitions"] == METRIC_DEFINITIONS assert ( training_config_one["static_hyperparameters"]["sagemaker_estimator_module"] @@ -408,7 +408,7 @@ def test_fit_multi_estimators(sagemaker_session): assert training_config_two["objective_metric_name"] == OBJECTIVE_METRIC_NAME_TWO assert len(training_config_two["input_config"]) == 1 assert training_config_two["input_config"][0]["DataSource"]["S3DataSource"]["S3Uri"] == INPUTS - assert training_config_two["image"] == estimator_two.train_image() + assert training_config_two["image_uri"] == estimator_two.train_image() assert training_config_two["metric_definitions"] is None assert training_config_two["static_hyperparameters"]["mini_batch_size"] == "4000" _assert_parameter_ranges( diff --git a/tests/unit/test_xgboost.py b/tests/unit/test_xgboost.py index 85b2c2dedb..b8d6b4c765 100644 --- a/tests/unit/test_xgboost.py +++ b/tests/unit/test_xgboost.py @@ -106,7 +106,7 @@ def _xgboost_estimator( def _create_train_job(version, instance_count=1): return { - "image": _get_full_cpu_image_uri(version), + "image_uri": _get_full_cpu_image_uri(version), "input_mode": "File", "input_config": [ { @@ -253,7 +253,7 @@ def test_create_model_with_optional_params(sagemaker_session, xgboost_full_versi dependencies = ["/directory/a", "/directory/b"] model_name = "model-name" model = xgboost.create_model( - image=custom_image, + image_uri=custom_image, role=new_role, model_server_workers=model_server_workers, vpc_config_override=vpc_config, @@ -263,7 +263,7 @@ def test_create_model_with_optional_params(sagemaker_session, xgboost_full_versi name=model_name, ) - assert model.image == custom_image + assert model.image_uri == custom_image assert model.role == new_role assert model.model_server_workers == model_server_workers assert model.vpc_config == vpc_config @@ -294,7 +294,7 @@ def test_create_model_with_custom_image(sagemaker_session, xgboost_full_version) xgboost.fit(inputs="s3://mybucket/train", job_name="new_name") model = xgboost.create_model() - assert model.image == custom_image + assert model.image_uri == custom_image @patch("time.strftime", return_value=TIMESTAMP) From 2035b5289aa8f8b5d7f292f00a62b1a3fc23d03f Mon Sep 17 00:00:00 2001 From: Lauren Yu <6631887+laurenyu@users.noreply.github.com> Date: Mon, 6 Jul 2020 13:56:50 -0700 Subject: [PATCH 2/5] black format --- tests/unit/test_fm.py | 4 +++- tests/unit/test_session.py | 5 ++++- tests/unit/test_sparkml_serving.py | 4 +++- 3 files changed, 10 insertions(+), 3 deletions(-) diff --git a/tests/unit/test_fm.py b/tests/unit/test_fm.py index 44aae132e5..c93dbc0bba 100644 --- a/tests/unit/test_fm.py +++ b/tests/unit/test_fm.py @@ -313,7 +313,9 @@ def test_model_image(sagemaker_session): fm.fit(data, MINI_BATCH_SIZE) model = fm.create_model() - assert model.image_uri == registry(REGION, "factorization-machines") + "/factorization-machines:1" + assert ( + model.image_uri == registry(REGION, "factorization-machines") + "/factorization-machines:1" + ) def test_predictor_type(sagemaker_session): diff --git a/tests/unit/test_session.py b/tests/unit/test_session.py index 36212a7885..e82ddc06ec 100644 --- a/tests/unit/test_session.py +++ b/tests/unit/test_session.py @@ -1706,7 +1706,10 @@ def test_create_model_from_job_with_container_def(sagemaker_session): ims = sagemaker_session ims.sagemaker_client.describe_training_job.return_value = COMPLETED_DESCRIBE_JOB_RESULT ims.create_model_from_job( - JOB_NAME, primary_container_image_uri="some-image", model_data_url="some-data", env={"a": "b"} + JOB_NAME, + primary_container_image_uri="some-image", + model_data_url="some-data", + env={"a": "b"}, ) [create_model_call] = ims.sagemaker_client.create_model.call_args_list c_def = create_model_call[1]["PrimaryContainer"] diff --git a/tests/unit/test_sparkml_serving.py b/tests/unit/test_sparkml_serving.py index 9499abf8af..efe6afc99b 100644 --- a/tests/unit/test_sparkml_serving.py +++ b/tests/unit/test_sparkml_serving.py @@ -49,7 +49,9 @@ def sagemaker_session(): def test_sparkml_model(sagemaker_session): sparkml = SparkMLModel(sagemaker_session=sagemaker_session, model_data=MODEL_DATA, role=ROLE) - assert sparkml.image_uri == registry(REGION, "sparkml-serving") + "/sagemaker-sparkml-serving:2.2" + assert ( + sparkml.image_uri == registry(REGION, "sparkml-serving") + "/sagemaker-sparkml-serving:2.2" + ) def test_predictor_type(sagemaker_session): From e3b93cb5f926eebfdffc9d557b29f5b7c9ef4ece Mon Sep 17 00:00:00 2001 From: Lauren Yu <6631887+laurenyu@users.noreply.github.com> Date: Mon, 6 Jul 2020 14:46:22 -0700 Subject: [PATCH 3/5] primary_container_image_uri/deployment_image_uri --> image_uri --- src/sagemaker/session.py | 16 +++++++--------- tests/unit/test_endpoint_from_job.py | 4 ++-- tests/unit/test_endpoint_from_model_data.py | 6 +++--- tests/unit/test_session.py | 7 ++----- 4 files changed, 14 insertions(+), 19 deletions(-) diff --git a/src/sagemaker/session.py b/src/sagemaker/session.py index 066007f251..8b04403eef 100644 --- a/src/sagemaker/session.py +++ b/src/sagemaker/session.py @@ -2156,7 +2156,7 @@ def create_model_from_job( training_job_name, name=None, role=None, - primary_container_image_uri=None, + image_uri=None, model_data_url=None, env=None, vpc_config_override=vpc_utils.VPC_CONFIG_DEFAULT, @@ -2171,7 +2171,7 @@ def create_model_from_job( role (str): The ``ExecutionRoleArn`` IAM Role ARN for the ``Model``, specified either by an IAM role name or role ARN. If None, the ``RoleArn`` from the SageMaker Training Job will be used. - primary_container_image_uri (str): The Docker image URI (default: None). If None, it + image_uri (str): The Docker image URI (default: None). If None, it defaults to the training image URI from ``training_job_name``. model_data_url (str): S3 location of the model data (default: None). If None, defaults to the ``ModelS3Artifacts`` of ``training_job_name``. @@ -2194,7 +2194,7 @@ def create_model_from_job( role = role or training_job["RoleArn"] env = env or {} primary_container = container_def( - primary_container_image_uri or training_job["AlgorithmSpecification"]["TrainingImage"], + image_uri or training_job["AlgorithmSpecification"]["TrainingImage"], model_data_url=model_data_url or training_job["ModelArtifacts"]["S3ModelArtifacts"], env=env, ) @@ -2700,7 +2700,7 @@ def endpoint_from_job( job_name, initial_instance_count, instance_type, - deployment_image_uri=None, + image_uri=None, name=None, role=None, wait=True, @@ -2725,7 +2725,7 @@ def endpoint_from_job( autoscaling. instance_type (str): Type of EC2 instance to deploy to an endpoint for prediction, for example, 'ml.c4.xlarge'. - deployment_image_uri (str): The Docker image which defines the inference code to be used + image_uri (str): The Docker image which defines the inference code to be used as the entry point for accepting prediction requests. If not specified, uses the image used for the training job. name (str): Name of the ``Endpoint`` to create. If not specified, uses the training job @@ -2755,16 +2755,14 @@ def endpoint_from_job( """ job_desc = self.sagemaker_client.describe_training_job(TrainingJobName=job_name) output_url = job_desc["ModelArtifacts"]["S3ModelArtifacts"] - deployment_image_uri = ( - deployment_image_uri or job_desc["AlgorithmSpecification"]["TrainingImage"] - ) + image_uri = image_uri or job_desc["AlgorithmSpecification"]["TrainingImage"] role = role or job_desc["RoleArn"] name = name or job_name vpc_config_override = _vpc_config_from_training_job(job_desc, vpc_config_override) return self.endpoint_from_model_data( model_s3_location=output_url, - deployment_image_uri=deployment_image_uri, + image_uri=image_uri, initial_instance_count=initial_instance_count, instance_type=instance_type, name=name, diff --git a/tests/unit/test_endpoint_from_job.py b/tests/unit/test_endpoint_from_job.py index 189d103cbe..4b30afd245 100644 --- a/tests/unit/test_endpoint_from_job.py +++ b/tests/unit/test_endpoint_from_job.py @@ -67,7 +67,7 @@ def test_all_defaults_no_existing_entities(sagemaker_session): expected_args = original_args.copy() expected_args.pop("job_name") expected_args["model_s3_location"] = S3_MODEL_ARTIFACTS - expected_args["deployment_image_uri"] = IMAGE + expected_args["image_uri"] = IMAGE expected_args["role"] = TRAIN_ROLE expected_args["name"] = JOB_NAME expected_args["model_environment_vars"] = None @@ -85,7 +85,7 @@ def test_no_defaults_no_existing_entities(sagemaker_session): "job_name": JOB_NAME, "initial_instance_count": INITIAL_INSTANCE_COUNT, "instance_type": INSTANCE_TYPE, - "deployment_image_uri": DEPLOY_IMAGE, + "image_uri": DEPLOY_IMAGE, "role": DEPLOY_ROLE, "name": NEW_ENTITY_NAME, "model_environment_vars": ENV_VARS, diff --git a/tests/unit/test_endpoint_from_model_data.py b/tests/unit/test_endpoint_from_model_data.py index 8e0b1833e7..9e480c9077 100644 --- a/tests/unit/test_endpoint_from_model_data.py +++ b/tests/unit/test_endpoint_from_model_data.py @@ -56,7 +56,7 @@ def sagemaker_session(): def test_all_defaults_no_existing_entities(name_from_image_mock, sagemaker_session): returned_name = sagemaker_session.endpoint_from_model_data( model_s3_location=S3_MODEL_ARTIFACTS, - deployment_image_uri=DEPLOY_IMAGE, + image_uri=DEPLOY_IMAGE, initial_instance_count=INITIAL_INSTANCE_COUNT, instance_type=INSTANCE_TYPE, role=DEPLOY_ROLE, @@ -96,7 +96,7 @@ def test_no_defaults_no_existing_entities(name_from_image_mock, sagemaker_sessio returned_name = sagemaker_session.endpoint_from_model_data( model_s3_location=S3_MODEL_ARTIFACTS, - deployment_image_uri=DEPLOY_IMAGE, + image_uri=DEPLOY_IMAGE, initial_instance_count=INITIAL_INSTANCE_COUNT, instance_type=INSTANCE_TYPE, role=DEPLOY_ROLE, @@ -145,7 +145,7 @@ def test_model_and_endpoint_config_exist(name_from_image_mock, sagemaker_session sagemaker_session.endpoint_from_model_data( model_s3_location=S3_MODEL_ARTIFACTS, - deployment_image_uri=DEPLOY_IMAGE, + image_uri=DEPLOY_IMAGE, initial_instance_count=INITIAL_INSTANCE_COUNT, instance_type=INSTANCE_TYPE, wait=False, diff --git a/tests/unit/test_session.py b/tests/unit/test_session.py index e82ddc06ec..db54ee4aed 100644 --- a/tests/unit/test_session.py +++ b/tests/unit/test_session.py @@ -1697,7 +1697,7 @@ def test_create_model_from_job_with_tags(sagemaker_session): def test_create_model_from_job_with_image(sagemaker_session): ims = sagemaker_session ims.sagemaker_client.describe_training_job.return_value = COMPLETED_DESCRIBE_JOB_RESULT - ims.create_model_from_job(JOB_NAME, primary_container_image_uri="some-image") + ims.create_model_from_job(JOB_NAME, image_uri="some-image") [create_model_call] = ims.sagemaker_client.create_model.call_args_list assert dict(create_model_call[1]["PrimaryContainer"])["Image"] == "some-image" @@ -1706,10 +1706,7 @@ def test_create_model_from_job_with_container_def(sagemaker_session): ims = sagemaker_session ims.sagemaker_client.describe_training_job.return_value = COMPLETED_DESCRIBE_JOB_RESULT ims.create_model_from_job( - JOB_NAME, - primary_container_image_uri="some-image", - model_data_url="some-data", - env={"a": "b"}, + JOB_NAME, image_uri="some-image", model_data_url="some-data", env={"a": "b"}, ) [create_model_call] = ims.sagemaker_client.create_model.call_args_list c_def = create_model_call[1]["PrimaryContainer"] From 495837da20a1818f6560c5283ba51919cf5c4591 Mon Sep 17 00:00:00 2001 From: Lauren Yu <6631887+laurenyu@users.noreply.github.com> Date: Mon, 6 Jul 2020 14:52:08 -0700 Subject: [PATCH 4/5] rename one more deployment_image_uri --- src/sagemaker/session.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/sagemaker/session.py b/src/sagemaker/session.py index 8b04403eef..5ae284a08b 100644 --- a/src/sagemaker/session.py +++ b/src/sagemaker/session.py @@ -2777,7 +2777,7 @@ def endpoint_from_job( def endpoint_from_model_data( self, model_s3_location, - deployment_image_uri, + image_uri, initial_instance_count, instance_type, name=None, @@ -2792,7 +2792,7 @@ def endpoint_from_model_data( Args: model_s3_location (str): S3 URI of the model artifacts to use for the endpoint. - deployment_image_uri (str): The Docker image URI which defines the runtime code to be + image_uri (str): The Docker image URI which defines the runtime code to be used as the entry point for accepting prediction requests. initial_instance_count (int): Minimum number of EC2 instances to launch. The actual number of active instances for an endpoint at any given time varies due to @@ -2824,7 +2824,7 @@ def endpoint_from_model_data( """ model_environment_vars = model_environment_vars or {} - name = name or name_from_image(deployment_image_uri) + name = name or name_from_image(image_uri) model_vpc_config = vpc_utils.sanitize(model_vpc_config) if _deployment_entity_exists( @@ -2838,7 +2838,7 @@ def endpoint_from_model_data( lambda: self.sagemaker_client.describe_model(ModelName=name) ): primary_container = container_def( - image_uri=deployment_image_uri, + image_uri=image_uri, model_data_url=model_s3_location, env=model_environment_vars, ) From 1c75a4d2680ca52424df29fe748b7b5bc58c59cc Mon Sep 17 00:00:00 2001 From: Lauren Yu <6631887+laurenyu@users.noreply.github.com> Date: Mon, 6 Jul 2020 17:18:49 -0700 Subject: [PATCH 5/5] black format --- src/sagemaker/session.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/src/sagemaker/session.py b/src/sagemaker/session.py index 5ae284a08b..df7beab82c 100644 --- a/src/sagemaker/session.py +++ b/src/sagemaker/session.py @@ -2838,9 +2838,7 @@ def endpoint_from_model_data( lambda: self.sagemaker_client.describe_model(ModelName=name) ): primary_container = container_def( - image_uri=image_uri, - model_data_url=model_s3_location, - env=model_environment_vars, + image_uri=image_uri, model_data_url=model_s3_location, env=model_environment_vars, ) self.create_model( name=name, role=role, container_defs=primary_container, vpc_config=model_vpc_config