From ba4852187749094869c5a2d251086f1ed76f0a33 Mon Sep 17 00:00:00 2001 From: Lauren Yu <6631887+laurenyu@users.noreply.github.com> Date: Mon, 18 May 2020 14:38:07 -0700 Subject: [PATCH 1/7] breaking: remove legacy TensorFlowModel and TensorFlowPredictor classes This change also removes the associated serialization/deserialization code used by TensorFlowPredictor and the locally copied TFS APIs. --- src/sagemaker/cli/tensorflow.py | 5 +- src/sagemaker/tensorflow/__init__.py | 18 +- src/sagemaker/tensorflow/estimator.py | 238 +--------- src/sagemaker/tensorflow/model.py | 183 -------- src/sagemaker/tensorflow/predictor.py | 189 -------- .../tensorflow/tensorflow_serving/__init__.py | 0 .../tensorflow_serving/apis/__init__.py | 0 .../apis/classification_pb2.py | 361 --------------- .../apis/get_model_metadata_pb2.py | 391 ---------------- .../tensorflow_serving/apis/inference_pb2.py | 361 --------------- .../tensorflow_serving/apis/input_pb2.py | 256 ----------- .../tensorflow_serving/apis/model_pb2.py | 126 ----- .../apis/model_service_pb2.py | 221 --------- .../apis/model_service_pb2_grpc.py | 72 --- .../tensorflow_serving/apis/predict_pb2.py | 354 -------------- .../apis/prediction_service_pb2.py | 431 ------------------ .../tensorflow_serving/apis/regression_pb2.py | 291 ------------ tests/unit/test_tf_estimator.py | 50 +- tests/unit/test_tf_predictor.py | 415 ----------------- tests/unit/test_tfs.py | 4 +- 20 files changed, 22 insertions(+), 3944 deletions(-) delete mode 100644 src/sagemaker/tensorflow/model.py delete mode 100644 src/sagemaker/tensorflow/predictor.py delete mode 100644 src/sagemaker/tensorflow/tensorflow_serving/__init__.py delete mode 100755 src/sagemaker/tensorflow/tensorflow_serving/apis/__init__.py delete mode 100755 src/sagemaker/tensorflow/tensorflow_serving/apis/classification_pb2.py delete mode 100755 src/sagemaker/tensorflow/tensorflow_serving/apis/get_model_metadata_pb2.py delete mode 100755 src/sagemaker/tensorflow/tensorflow_serving/apis/inference_pb2.py delete mode 100755 src/sagemaker/tensorflow/tensorflow_serving/apis/input_pb2.py delete mode 100755 src/sagemaker/tensorflow/tensorflow_serving/apis/model_pb2.py delete mode 100755 src/sagemaker/tensorflow/tensorflow_serving/apis/model_service_pb2.py delete mode 100755 src/sagemaker/tensorflow/tensorflow_serving/apis/model_service_pb2_grpc.py delete mode 100755 src/sagemaker/tensorflow/tensorflow_serving/apis/predict_pb2.py delete mode 100755 src/sagemaker/tensorflow/tensorflow_serving/apis/prediction_service_pb2.py delete mode 100755 src/sagemaker/tensorflow/tensorflow_serving/apis/regression_pb2.py delete mode 100644 tests/unit/test_tf_predictor.py diff --git a/src/sagemaker/cli/tensorflow.py b/src/sagemaker/cli/tensorflow.py index 61af24f1ca..ae286aa2fe 100644 --- a/src/sagemaker/cli/tensorflow.py +++ b/src/sagemaker/cli/tensorflow.py @@ -68,13 +68,12 @@ def create_model(self, model_url): Args: model_url: """ - from sagemaker.tensorflow.model import TensorFlowModel + from sagemaker.tensorflow.serving import Model - return TensorFlowModel( + return Model( model_data=model_url, role=self.role_name, entry_point=self.script, - py_version=self.python, name=self.endpoint_name, env=self.environment, ) diff --git a/src/sagemaker/tensorflow/__init__.py b/src/sagemaker/tensorflow/__init__.py index b39c96898e..8701c9eb85 100644 --- a/src/sagemaker/tensorflow/__init__.py +++ b/src/sagemaker/tensorflow/__init__.py @@ -10,21 +10,7 @@ # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. -"""Placeholder docstring""" +"""Classes for using TensorFlow and TensorFlow Serving with Amazon SageMaker.""" from __future__ import absolute_import -import sys -import os - -# Hack to use our local copy of tensorflow_serving.apis, which contains the protobuf-generated -# classes for tensorflow serving. Currently tensorflow_serving_api can only be pip-installed for -# python 2. -sys.path.append(os.path.dirname(__file__)) - -from sagemaker.tensorflow.estimator import ( # noqa: E402, F401 # pylint: disable=wrong-import-position - TensorFlow, -) -from sagemaker.tensorflow.model import ( # noqa: E402, F401 # pylint: disable=wrong-import-position - TensorFlowModel, - TensorFlowPredictor, -) +from sagemaker.tensorflow.estimator import TensorFlow # noqa: F401 (imported but unused) diff --git a/src/sagemaker/tensorflow/estimator.py b/src/sagemaker/tensorflow/estimator.py index d83d9a1a08..f14a3d2d9f 100644 --- a/src/sagemaker/tensorflow/estimator.py +++ b/src/sagemaker/tensorflow/estimator.py @@ -23,9 +23,7 @@ from sagemaker.estimator import Framework import sagemaker.fw_utils as fw from sagemaker.tensorflow import defaults -from sagemaker.tensorflow.model import TensorFlowModel from sagemaker.tensorflow.serving import Model -from sagemaker.transformer import Transformer from sagemaker.vpc_utils import VPC_CONFIG_DEFAULT logger = logging.getLogger("sagemaker") @@ -252,10 +250,8 @@ def _prepare_init_params_from_job_description(cls, job_details, model_channel_na def create_model( self, - model_server_workers=None, role=None, vpc_config_override=VPC_CONFIG_DEFAULT, - endpoint_type=None, entry_point=None, source_dir=None, dependencies=None, @@ -266,43 +262,25 @@ def create_model( Args: role (str): The ``ExecutionRoleArn`` IAM Role ARN for the ``Model``, which is also - used during transform jobs. If not specified, the role from the Estimator will be - used. - model_server_workers (int): Optional. The number of worker processes used by the - inference server. If None, server will use one worker per vCPU. + used during transform jobs. If not specified, the role from the Estimator is used. vpc_config_override (dict[str, list[str]]): Optional override for VpcConfig set on the - model. - Default: use subnets and security groups from this Estimator. + model. Default: use subnets and security groups from this Estimator. + * 'Subnets' (list[str]): List of subnet ids. * 'SecurityGroupIds' (list[str]): List of security group ids. - endpoint_type (str): Optional. Selects the software stack used by the inference server. - If not specified, the model will be configured to use the default - SageMaker model server. If 'tensorflow-serving', the model will be configured to - use the SageMaker Tensorflow Serving container. + entry_point (str): Path (absolute or relative) to the local Python source file which - should be executed as the entry point to training. If not specified and - ``endpoint_type`` is 'tensorflow-serving', no entry point is used. If - ``endpoint_type`` is also ``None``, then the training entry point is used. + should be executed as the entry point to training (default: None). source_dir (str): Path (absolute or relative) to a directory with any other serving - source code dependencies aside from the entry point file. If not specified and - ``endpoint_type`` is 'tensorflow-serving', no source_dir is used. If - ``endpoint_type`` is also ``None``, then the model source directory from training - is used. + source code dependencies aside from the entry point file (default: None). dependencies (list[str]): A list of paths to directories (absolute or relative) with - any additional libraries that will be exported to the container. - If not specified and ``endpoint_type`` is 'tensorflow-serving', ``dependencies`` is - set to ``None``. - If ``endpoint_type`` is also ``None``, then the dependencies from training are used. - **kwargs: Additional kwargs passed to :class:`~sagemaker.tensorflow.serving.Model` - and :class:`~sagemaker.tensorflow.model.TensorFlowModel` constructors. + any additional libraries that will be exported to the container (default: None). + **kwargs: Additional kwargs passed to :class:`~sagemaker.tensorflow.serving.Model`. Returns: - sagemaker.tensorflow.model.TensorFlowModel or sagemaker.tensorflow.serving.Model: A - ``Model`` object. See :class:`~sagemaker.tensorflow.serving.Model` or - :class:`~sagemaker.tensorflow.model.TensorFlowModel` for full details. + sagemaker.tensorflow.serving.Model: A ``Model`` object. + See :class:`~sagemaker.tensorflow.serving.Model` for full details. """ - role = role or self.role - if "image" not in kwargs: kwargs["image"] = self.image_name @@ -312,41 +290,11 @@ def create_model( if "enable_network_isolation" not in kwargs: kwargs["enable_network_isolation"] = self.enable_network_isolation() - if endpoint_type == "tensorflow-serving" or self._script_mode_enabled: - return self._create_tfs_model( - role=role, - vpc_config_override=vpc_config_override, - entry_point=entry_point, - source_dir=source_dir, - dependencies=dependencies, - **kwargs - ) - - return self._create_default_model( - model_server_workers=model_server_workers, - role=role, - vpc_config_override=vpc_config_override, - entry_point=entry_point, - source_dir=source_dir, - dependencies=dependencies, - **kwargs - ) - - def _create_tfs_model( - self, - role=None, - vpc_config_override=VPC_CONFIG_DEFAULT, - entry_point=None, - source_dir=None, - dependencies=None, - **kwargs - ): - """Placeholder docstring""" return Model( model_data=self.model_data, - role=role, + role=role or self.role, container_log_level=self.container_log_level, - framework_version=utils.get_short_version(self.framework_version), + framework_version=self.framework_version, sagemaker_session=self.sagemaker_session, vpc_config=self.get_vpc_config(vpc_config_override), entry_point=entry_point, @@ -355,34 +303,6 @@ def _create_tfs_model( **kwargs ) - def _create_default_model( - self, - model_server_workers, - role, - vpc_config_override, - entry_point=None, - source_dir=None, - dependencies=None, - **kwargs - ): - """Placeholder docstring""" - return TensorFlowModel( - self.model_data, - role, - entry_point or self.entry_point, - source_dir=source_dir or self._model_source_dir(), - enable_cloudwatch_metrics=self.enable_cloudwatch_metrics, - container_log_level=self.container_log_level, - code_location=self.code_location, - py_version=self.py_version, - framework_version=self.framework_version, - model_server_workers=model_server_workers, - sagemaker_session=self.sagemaker_session, - vpc_config=self.get_vpc_config(vpc_config_override), - dependencies=dependencies or self.dependencies, - **kwargs - ) - def hyperparameters(self): """Return hyperparameters used by your custom TensorFlow code during model training.""" hyperparameters = super(TensorFlow, self).hyperparameters() @@ -464,137 +384,3 @@ def train_image(self): ) return super(TensorFlow, self).train_image() - - def transformer( - self, - instance_count, - instance_type, - strategy=None, - assemble_with=None, - output_path=None, - output_kms_key=None, - accept=None, - env=None, - max_concurrent_transforms=None, - max_payload=None, - tags=None, - role=None, - model_server_workers=None, - volume_kms_key=None, - endpoint_type=None, - entry_point=None, - vpc_config_override=VPC_CONFIG_DEFAULT, - enable_network_isolation=None, - model_name=None, - ): - """Return a ``Transformer`` that uses a SageMaker Model based on the training job. It - reuses the SageMaker Session and base job name used by the Estimator. - - Args: - instance_count (int): Number of EC2 instances to use. - instance_type (str): Type of EC2 instance to use, for example, 'ml.c4.xlarge'. - strategy (str): The strategy used to decide how to batch records in a single request - (default: None). Valid values: 'MultiRecord' and 'SingleRecord'. - assemble_with (str): How the output is assembled (default: None). Valid values: 'Line' - or 'None'. - output_path (str): S3 location for saving the transform result. If not specified, - results are stored to a default bucket. - output_kms_key (str): Optional. KMS key ID for encrypting the transform output - (default: None). - accept (str): The accept header passed by the client to - the inference endpoint. If it is supported by the endpoint, - it will be the format of the batch transform output. - env (dict): Environment variables to be set for use during the transform job - (default: None). - max_concurrent_transforms (int): The maximum number of HTTP requests to be made to - each individual transform container at one time. - max_payload (int): Maximum size of the payload in a single HTTP request to the - container in MB. - tags (list[dict]): List of tags for labeling a transform job. If none specified, then - the tags used for the training job are used for the transform job. - role (str): The ``ExecutionRoleArn`` IAM Role ARN for the ``Model``, which is also - used during transform jobs. If not specified, the role from the Estimator will be - used. - model_server_workers (int): Optional. The number of worker processes used by the - inference server. If None, server will use one worker per vCPU. - volume_kms_key (str): Optional. KMS key ID for encrypting the volume attached to the ML - compute instance (default: None). - endpoint_type (str): Optional. Selects the software stack used by the inference server. - If not specified, the model will be configured to use the default - SageMaker model server. - If 'tensorflow-serving', the model will be configured to - use the SageMaker Tensorflow Serving container. - entry_point (str): Path (absolute or relative) to the local Python source file which - should be executed as the entry point to training. If not specified and - ``endpoint_type`` is 'tensorflow-serving', no entry point is used. If - ``endpoint_type`` is also ``None``, then the training entry point is used. - vpc_config_override (dict[str, list[str]]): Optional override for - the VpcConfig set on the model. - Default: use subnets and security groups from this Estimator. - - * 'Subnets' (list[str]): List of subnet ids. - * 'SecurityGroupIds' (list[str]): List of security group ids. - - enable_network_isolation (bool): Specifies whether container will - run in network isolation mode. Network isolation mode restricts - the container access to outside networks (such as the internet). - The container does not make any inbound or outbound network - calls. If True, a channel named "code" will be created for any - user entry script for inference. Also known as Internet-free mode. - If not specified, this setting is taken from the estimator's - current configuration. - model_name (str): Name to use for creating an Amazon SageMaker - model. If not specified, the name of the training job is used. - """ - role = role or self.role - - if self.latest_training_job is None: - logging.warning( - "No finished training job found associated with this estimator. Please make sure " - "this estimator is only used for building workflow config" - ) - return Transformer( - model_name or self._current_job_name, - instance_count, - instance_type, - strategy=strategy, - assemble_with=assemble_with, - output_path=output_path, - output_kms_key=output_kms_key, - accept=accept, - max_concurrent_transforms=max_concurrent_transforms, - max_payload=max_payload, - env=env or {}, - tags=tags, - base_transform_job_name=self.base_job_name, - volume_kms_key=volume_kms_key, - sagemaker_session=self.sagemaker_session, - ) - - if enable_network_isolation is None: - enable_network_isolation = self.enable_network_isolation() - - model = self.create_model( - model_server_workers=model_server_workers, - role=role, - vpc_config_override=vpc_config_override, - endpoint_type=endpoint_type, - entry_point=entry_point, - enable_network_isolation=enable_network_isolation, - name=model_name, - ) - - return model.transformer( - instance_count, - instance_type, - strategy=strategy, - assemble_with=assemble_with, - output_path=output_path, - output_kms_key=output_kms_key, - accept=accept, - env=env, - max_concurrent_transforms=max_concurrent_transforms, - max_payload=max_payload, - tags=tags, - volume_kms_key=volume_kms_key, - ) diff --git a/src/sagemaker/tensorflow/model.py b/src/sagemaker/tensorflow/model.py deleted file mode 100644 index 5e7984f5b3..0000000000 --- a/src/sagemaker/tensorflow/model.py +++ /dev/null @@ -1,183 +0,0 @@ -# Copyright 2017-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"). You -# may not use this file except in compliance with the License. A copy of -# the License is located at -# -# http://aws.amazon.com/apache2.0/ -# -# or in the "license" file accompanying this file. This file is -# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF -# ANY KIND, either express or implied. See the License for the specific -# language governing permissions and limitations under the License. -"""Placeholder docstring""" -from __future__ import absolute_import - -import logging - -import sagemaker -from sagemaker.fw_utils import ( - create_image_uri, - model_code_key_prefix, - python_deprecation_warning, - empty_framework_version_warning, -) -from sagemaker.model import FrameworkModel, MODEL_SERVER_WORKERS_PARAM_NAME -from sagemaker.predictor import RealTimePredictor -from sagemaker.tensorflow import defaults -from sagemaker.tensorflow.predictor import tf_json_serializer, tf_json_deserializer - -logger = logging.getLogger("sagemaker") - - -class TensorFlowPredictor(RealTimePredictor): - """A ``RealTimePredictor`` for inference against TensorFlow endpoint. - - This is able to serialize Python lists, dictionaries, and numpy arrays to - multidimensional tensors for inference - """ - - def __init__(self, endpoint_name, sagemaker_session=None): - """Initialize an ``TensorFlowPredictor``. - - Args: - endpoint_name (str): The name of the endpoint to perform inference - on. - sagemaker_session (sagemaker.session.Session): Session object which - manages interactions with Amazon SageMaker APIs and any other - AWS services needed. If not specified, the estimator creates one - using the default AWS configuration chain. - """ - super(TensorFlowPredictor, self).__init__( - endpoint_name, sagemaker_session, tf_json_serializer, tf_json_deserializer - ) - - -class TensorFlowModel(FrameworkModel): - """Placeholder docstring""" - - __framework_name__ = "tensorflow" - - def __init__( - self, - model_data, - role, - entry_point, - image=None, - py_version="py2", - framework_version=None, - predictor_cls=TensorFlowPredictor, - model_server_workers=None, - **kwargs - ): - """Initialize an TensorFlowModel. - - Args: - model_data (str): The S3 location of a SageMaker model data - ``.tar.gz`` file. - role (str): An AWS IAM role (either name or full ARN). The Amazon - SageMaker training jobs and APIs that create Amazon SageMaker - endpoints use this role to access training data and model - artifacts. After the endpoint is created, the inference code - might use the IAM role, if it needs to access an AWS resource. - entry_point (str): Path (absolute or relative) to the Python source - file which should be executed as the entry point to model - hosting. This should be compatible with either Python 2.7 or - Python 3.5. - image (str): A Docker image URI (default: None). If not specified, a - default image for TensorFlow will be used. - py_version (str): Python version you want to use for executing your - model training code (default: 'py2'). - framework_version (str): TensorFlow version you want to use for - executing your model training code. - predictor_cls (callable[str, sagemaker.session.Session]): A function - to call to create a predictor with an endpoint name and - SageMaker ``Session``. If specified, ``deploy()`` returns the - result of invoking this function on the created endpoint name. - model_server_workers (int): Optional. The number of worker processes - used by the inference server. If None, server will use one - worker per vCPU. - **kwargs: Keyword arguments passed to the ``FrameworkModel`` - initializer. - - .. tip:: - - You can find additional parameters for initializing this class at - :class:`~sagemaker.model.FrameworkModel` and - :class:`~sagemaker.model.Model`. - """ - super(TensorFlowModel, self).__init__( - model_data, image, role, entry_point, predictor_cls=predictor_cls, **kwargs - ) - - if py_version == "py2": - logger.warning( - python_deprecation_warning(self.__framework_name__, defaults.LATEST_PY2_VERSION) - ) - - if framework_version is None: - logger.warning( - empty_framework_version_warning(defaults.TF_VERSION, defaults.LATEST_VERSION) - ) - - self.py_version = py_version - self.framework_version = framework_version or defaults.TF_VERSION - self.model_server_workers = model_server_workers - - def prepare_container_def(self, instance_type, accelerator_type=None): - """Return a container definition with framework configuration set in - model environment variables. - - This also uploads user-supplied code to S3. - - Args: - instance_type (str): The EC2 instance type to deploy this Model to. - For example, 'ml.p2.xlarge'. - accelerator_type (str): The Elastic Inference accelerator type to - deploy to the instance for loading and making inferences to the - model. For example, 'ml.eia1.medium'. - - Returns: - dict[str, str]: A container definition object usable with the - CreateModel API. - """ - deploy_image = self.image - if not deploy_image: - region_name = self.sagemaker_session.boto_region_name - deploy_image = self.serving_image_uri( - region_name, instance_type, accelerator_type=accelerator_type - ) - - deploy_key_prefix = model_code_key_prefix(self.key_prefix, self.name, deploy_image) - self._upload_code(deploy_key_prefix) - deploy_env = dict(self.env) - deploy_env.update(self._framework_env_vars()) - - if self.model_server_workers: - deploy_env[MODEL_SERVER_WORKERS_PARAM_NAME.upper()] = str(self.model_server_workers) - - return sagemaker.container_def(deploy_image, self.model_data, deploy_env) - - def serving_image_uri(self, region_name, instance_type, accelerator_type=None): - """Create a URI for the serving image. - - Args: - region_name (str): AWS region where the image is uploaded. - instance_type (str): SageMaker instance type. Used to determine device type - (cpu/gpu/family-specific optimized). - accelerator_type (str): The Elastic Inference accelerator type to - deploy to the instance for loading and making inferences to the - model (default: None). For example, 'ml.eia1.medium'. - - Returns: - str: The appropriate image URI based on the given parameters. - - """ - return create_image_uri( - region_name, - self.__framework_name__, - instance_type, - self.framework_version, - self.py_version, - accelerator_type=accelerator_type, - ) diff --git a/src/sagemaker/tensorflow/predictor.py b/src/sagemaker/tensorflow/predictor.py deleted file mode 100644 index 1b6c86810c..0000000000 --- a/src/sagemaker/tensorflow/predictor.py +++ /dev/null @@ -1,189 +0,0 @@ -# Copyright 2017-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"). You -# may not use this file except in compliance with the License. A copy of -# the License is located at -# -# http://aws.amazon.com/apache2.0/ -# -# or in the "license" file accompanying this file. This file is -# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF -# ANY KIND, either express or implied. See the License for the specific -# language governing permissions and limitations under the License. -"""Placeholder docstring""" -from __future__ import absolute_import - -import json - -import google.protobuf.json_format as json_format -from google.protobuf.message import DecodeError -from protobuf_to_dict import protobuf_to_dict -from sagemaker.content_types import CONTENT_TYPE_JSON, CONTENT_TYPE_OCTET_STREAM, CONTENT_TYPE_CSV -from sagemaker.predictor import json_serializer, csv_serializer - - -def _possible_responses(): - """ - Returns: Possible available request types. - """ - from tensorflow.core.framework import tensor_pb2 # pylint: disable=no-name-in-module - from tensorflow_serving.apis import ( - predict_pb2, - classification_pb2, - inference_pb2, - regression_pb2, - ) - - return [ - predict_pb2.PredictResponse, - classification_pb2.ClassificationResponse, - inference_pb2.MultiInferenceResponse, - regression_pb2.RegressionResponse, - tensor_pb2.TensorProto, - ] - - -REGRESSION_REQUEST = "RegressionRequest" -MULTI_INFERENCE_REQUEST = "MultiInferenceRequest" -CLASSIFICATION_REQUEST = "ClassificationRequest" -PREDICT_REQUEST = "PredictRequest" - - -class _TFProtobufSerializer(object): - """Placeholder docstring""" - - def __init__(self): - self.content_type = CONTENT_TYPE_OCTET_STREAM - - def __call__(self, data): - # isinstance does not work here because a same protobuf message can be imported from a - # different module. For example sagemaker.tensorflow.tensorflow_serving.regression_pb2 and - # tensorflow_serving.apis.regression_pb2 - """ - Args: - data: - """ - predict_type = data.__class__.__name__ - - available_requests = [ - PREDICT_REQUEST, - CLASSIFICATION_REQUEST, - MULTI_INFERENCE_REQUEST, - REGRESSION_REQUEST, - ] - - if predict_type not in available_requests: - raise ValueError("request type {} is not supported".format(predict_type)) - return data.SerializeToString() - - -tf_serializer = _TFProtobufSerializer() - - -class _TFProtobufDeserializer(object): - """Placeholder docstring""" - - def __init__(self): - """Placeholder docstring""" - self.accept = CONTENT_TYPE_OCTET_STREAM - - def __call__(self, stream, content_type): - """ - Args: - stream: - content_type: - """ - try: - data = stream.read() - finally: - stream.close() - - for possible_response in _possible_responses(): - try: - response = possible_response() - response.ParseFromString(data) - return response - except (UnicodeDecodeError, DecodeError): - # given that the payload does not have the response type, there no way to infer - # the response without keeping state, so I'm iterating all the options. - pass - raise ValueError("data is not in the expected format") - - -tf_deserializer = _TFProtobufDeserializer() - - -class _TFJsonSerializer(object): - """Placeholder docstring""" - - def __init__(self): - self.content_type = CONTENT_TYPE_JSON - - def __call__(self, data): - """ - Args: - data: - """ - - from tensorflow.core.framework import tensor_pb2 # pylint: disable=no-name-in-module - - if isinstance(data, tensor_pb2.TensorProto): - return json_format.MessageToJson(data) - return json_serializer(data) - - -tf_json_serializer = _TFJsonSerializer() - - -class _TFJsonDeserializer(object): - """Placeholder docstring""" - - def __init__(self): - self.accept = CONTENT_TYPE_JSON - - def __call__(self, stream, content_type): - """ - Args: - stream: - content_type: - """ - try: - data = stream.read() - finally: - stream.close() - - for possible_response in _possible_responses(): - try: - return protobuf_to_dict(json_format.Parse(data, possible_response())) - except (UnicodeDecodeError, DecodeError, json_format.ParseError): - # given that the payload does not have the response type, there no way to infer - # the response without keeping state, so I'm iterating all the options. - pass - return json.loads(data.decode()) - - -tf_json_deserializer = _TFJsonDeserializer() - - -class _TFCsvSerializer(object): - """Placeholder docstring""" - - def __init__(self): - self.content_type = CONTENT_TYPE_CSV - - def __call__(self, data): - """ - Args: - data: - """ - to_serialize = data - - from tensorflow.core.framework import tensor_pb2 # pylint: disable=no-name-in-module - from tensorflow.python.framework import tensor_util # pylint: disable=no-name-in-module - - if isinstance(data, tensor_pb2.TensorProto): - to_serialize = tensor_util.MakeNdarray(data) - return csv_serializer(to_serialize) - - -tf_csv_serializer = _TFCsvSerializer() diff --git a/src/sagemaker/tensorflow/tensorflow_serving/__init__.py b/src/sagemaker/tensorflow/tensorflow_serving/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/src/sagemaker/tensorflow/tensorflow_serving/apis/__init__.py b/src/sagemaker/tensorflow/tensorflow_serving/apis/__init__.py deleted file mode 100755 index e69de29bb2..0000000000 diff --git a/src/sagemaker/tensorflow/tensorflow_serving/apis/classification_pb2.py b/src/sagemaker/tensorflow/tensorflow_serving/apis/classification_pb2.py deleted file mode 100755 index 89e4558b23..0000000000 --- a/src/sagemaker/tensorflow/tensorflow_serving/apis/classification_pb2.py +++ /dev/null @@ -1,361 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: tensorflow_serving/apis/classification.proto - -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database -from google.protobuf import descriptor_pb2 - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from tensorflow_serving.apis import input_pb2 as tensorflow__serving_dot_apis_dot_input__pb2 -from tensorflow_serving.apis import model_pb2 as tensorflow__serving_dot_apis_dot_model__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="tensorflow_serving/apis/classification.proto", - package="tensorflow.serving", - syntax="proto3", - serialized_pb=_b( - '\n,tensorflow_serving/apis/classification.proto\x12\x12tensorflow.serving\x1a#tensorflow_serving/apis/input.proto\x1a#tensorflow_serving/apis/model.proto"%\n\x05\x43lass\x12\r\n\x05label\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02"=\n\x0f\x43lassifications\x12*\n\x07\x63lasses\x18\x01 \x03(\x0b\x32\x19.tensorflow.serving.Class"T\n\x14\x43lassificationResult\x12<\n\x0f\x63lassifications\x18\x01 \x03(\x0b\x32#.tensorflow.serving.Classifications"t\n\x15\x43lassificationRequest\x12\x31\n\nmodel_spec\x18\x01 \x01(\x0b\x32\x1d.tensorflow.serving.ModelSpec\x12(\n\x05input\x18\x02 \x01(\x0b\x32\x19.tensorflow.serving.Input"\x85\x01\n\x16\x43lassificationResponse\x12\x31\n\nmodel_spec\x18\x02 \x01(\x0b\x32\x1d.tensorflow.serving.ModelSpec\x12\x38\n\x06result\x18\x01 \x01(\x0b\x32(.tensorflow.serving.ClassificationResultB\x03\xf8\x01\x01\x62\x06proto3' - ), - dependencies=[ - tensorflow__serving_dot_apis_dot_input__pb2.DESCRIPTOR, - tensorflow__serving_dot_apis_dot_model__pb2.DESCRIPTOR, - ], -) - - -_CLASS = _descriptor.Descriptor( - name="Class", - full_name="tensorflow.serving.Class", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="label", - full_name="tensorflow.serving.Class.label", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="score", - full_name="tensorflow.serving.Class.score", - index=1, - number=2, - type=2, - cpp_type=6, - label=1, - has_default_value=False, - default_value=float(0), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=142, - serialized_end=179, -) - - -_CLASSIFICATIONS = _descriptor.Descriptor( - name="Classifications", - full_name="tensorflow.serving.Classifications", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="classes", - full_name="tensorflow.serving.Classifications.classes", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ) - ], - extensions=[], - nested_types=[], - enum_types=[], - options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=181, - serialized_end=242, -) - - -_CLASSIFICATIONRESULT = _descriptor.Descriptor( - name="ClassificationResult", - full_name="tensorflow.serving.ClassificationResult", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="classifications", - full_name="tensorflow.serving.ClassificationResult.classifications", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ) - ], - extensions=[], - nested_types=[], - enum_types=[], - options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=244, - serialized_end=328, -) - - -_CLASSIFICATIONREQUEST = _descriptor.Descriptor( - name="ClassificationRequest", - full_name="tensorflow.serving.ClassificationRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="model_spec", - full_name="tensorflow.serving.ClassificationRequest.model_spec", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="input", - full_name="tensorflow.serving.ClassificationRequest.input", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=330, - serialized_end=446, -) - - -_CLASSIFICATIONRESPONSE = _descriptor.Descriptor( - name="ClassificationResponse", - full_name="tensorflow.serving.ClassificationResponse", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="model_spec", - full_name="tensorflow.serving.ClassificationResponse.model_spec", - index=0, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="result", - full_name="tensorflow.serving.ClassificationResponse.result", - index=1, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=449, - serialized_end=582, -) - -_CLASSIFICATIONS.fields_by_name["classes"].message_type = _CLASS -_CLASSIFICATIONRESULT.fields_by_name["classifications"].message_type = _CLASSIFICATIONS -_CLASSIFICATIONREQUEST.fields_by_name[ - "model_spec" -].message_type = tensorflow__serving_dot_apis_dot_model__pb2._MODELSPEC -_CLASSIFICATIONREQUEST.fields_by_name[ - "input" -].message_type = tensorflow__serving_dot_apis_dot_input__pb2._INPUT -_CLASSIFICATIONRESPONSE.fields_by_name[ - "model_spec" -].message_type = tensorflow__serving_dot_apis_dot_model__pb2._MODELSPEC -_CLASSIFICATIONRESPONSE.fields_by_name["result"].message_type = _CLASSIFICATIONRESULT -DESCRIPTOR.message_types_by_name["Class"] = _CLASS -DESCRIPTOR.message_types_by_name["Classifications"] = _CLASSIFICATIONS -DESCRIPTOR.message_types_by_name["ClassificationResult"] = _CLASSIFICATIONRESULT -DESCRIPTOR.message_types_by_name["ClassificationRequest"] = _CLASSIFICATIONREQUEST -DESCRIPTOR.message_types_by_name["ClassificationResponse"] = _CLASSIFICATIONRESPONSE -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -Class = _reflection.GeneratedProtocolMessageType( - "Class", - (_message.Message,), - dict( - DESCRIPTOR=_CLASS, - __module__="tensorflow_serving.apis.classification_pb2" - # @@protoc_insertion_point(class_scope:tensorflow.serving.Class) - ), -) -_sym_db.RegisterMessage(Class) - -Classifications = _reflection.GeneratedProtocolMessageType( - "Classifications", - (_message.Message,), - dict( - DESCRIPTOR=_CLASSIFICATIONS, - __module__="tensorflow_serving.apis.classification_pb2" - # @@protoc_insertion_point(class_scope:tensorflow.serving.Classifications) - ), -) -_sym_db.RegisterMessage(Classifications) - -ClassificationResult = _reflection.GeneratedProtocolMessageType( - "ClassificationResult", - (_message.Message,), - dict( - DESCRIPTOR=_CLASSIFICATIONRESULT, - __module__="tensorflow_serving.apis.classification_pb2" - # @@protoc_insertion_point(class_scope:tensorflow.serving.ClassificationResult) - ), -) -_sym_db.RegisterMessage(ClassificationResult) - -ClassificationRequest = _reflection.GeneratedProtocolMessageType( - "ClassificationRequest", - (_message.Message,), - dict( - DESCRIPTOR=_CLASSIFICATIONREQUEST, - __module__="tensorflow_serving.apis.classification_pb2" - # @@protoc_insertion_point(class_scope:tensorflow.serving.ClassificationRequest) - ), -) -_sym_db.RegisterMessage(ClassificationRequest) - -ClassificationResponse = _reflection.GeneratedProtocolMessageType( - "ClassificationResponse", - (_message.Message,), - dict( - DESCRIPTOR=_CLASSIFICATIONRESPONSE, - __module__="tensorflow_serving.apis.classification_pb2" - # @@protoc_insertion_point(class_scope:tensorflow.serving.ClassificationResponse) - ), -) -_sym_db.RegisterMessage(ClassificationResponse) - - -DESCRIPTOR.has_options = True -DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b("\370\001\001")) -# @@protoc_insertion_point(module_scope) diff --git a/src/sagemaker/tensorflow/tensorflow_serving/apis/get_model_metadata_pb2.py b/src/sagemaker/tensorflow/tensorflow_serving/apis/get_model_metadata_pb2.py deleted file mode 100755 index 07186710fe..0000000000 --- a/src/sagemaker/tensorflow/tensorflow_serving/apis/get_model_metadata_pb2.py +++ /dev/null @@ -1,391 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: tensorflow_serving/apis/get_model_metadata.proto - -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database -from google.protobuf import descriptor_pb2 - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.protobuf import any_pb2 as google_dot_protobuf_dot_any__pb2 -from tensorflow.core.protobuf import ( - meta_graph_pb2 as tensorflow_dot_core_dot_protobuf_dot_meta__graph__pb2, -) -from tensorflow_serving.apis import model_pb2 as tensorflow__serving_dot_apis_dot_model__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="tensorflow_serving/apis/get_model_metadata.proto", - package="tensorflow.serving", - syntax="proto3", - serialized_pb=_b( - '\n0tensorflow_serving/apis/get_model_metadata.proto\x12\x12tensorflow.serving\x1a\x19google/protobuf/any.proto\x1a)tensorflow/core/protobuf/meta_graph.proto\x1a#tensorflow_serving/apis/model.proto"\xae\x01\n\x0fSignatureDefMap\x12L\n\rsignature_def\x18\x01 \x03(\x0b\x32\x35.tensorflow.serving.SignatureDefMap.SignatureDefEntry\x1aM\n\x11SignatureDefEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\'\n\x05value\x18\x02 \x01(\x0b\x32\x18.tensorflow.SignatureDef:\x02\x38\x01"d\n\x17GetModelMetadataRequest\x12\x31\n\nmodel_spec\x18\x01 \x01(\x0b\x32\x1d.tensorflow.serving.ModelSpec\x12\x16\n\x0emetadata_field\x18\x02 \x03(\t"\xe2\x01\n\x18GetModelMetadataResponse\x12\x31\n\nmodel_spec\x18\x01 \x01(\x0b\x32\x1d.tensorflow.serving.ModelSpec\x12L\n\x08metadata\x18\x02 \x03(\x0b\x32:.tensorflow.serving.GetModelMetadataResponse.MetadataEntry\x1a\x45\n\rMetadataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12#\n\x05value\x18\x02 \x01(\x0b\x32\x14.google.protobuf.Any:\x02\x38\x01\x42\x03\xf8\x01\x01\x62\x06proto3' - ), - dependencies=[ - google_dot_protobuf_dot_any__pb2.DESCRIPTOR, - tensorflow_dot_core_dot_protobuf_dot_meta__graph__pb2.DESCRIPTOR, - tensorflow__serving_dot_apis_dot_model__pb2.DESCRIPTOR, - ], -) - - -_SIGNATUREDEFMAP_SIGNATUREDEFENTRY = _descriptor.Descriptor( - name="SignatureDefEntry", - full_name="tensorflow.serving.SignatureDefMap.SignatureDefEntry", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="key", - full_name="tensorflow.serving.SignatureDefMap.SignatureDefEntry.key", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="value", - full_name="tensorflow.serving.SignatureDefMap.SignatureDefEntry.value", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b("8\001")), - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=277, - serialized_end=354, -) - -_SIGNATUREDEFMAP = _descriptor.Descriptor( - name="SignatureDefMap", - full_name="tensorflow.serving.SignatureDefMap", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="signature_def", - full_name="tensorflow.serving.SignatureDefMap.signature_def", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ) - ], - extensions=[], - nested_types=[_SIGNATUREDEFMAP_SIGNATUREDEFENTRY], - enum_types=[], - options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=180, - serialized_end=354, -) - - -_GETMODELMETADATAREQUEST = _descriptor.Descriptor( - name="GetModelMetadataRequest", - full_name="tensorflow.serving.GetModelMetadataRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="model_spec", - full_name="tensorflow.serving.GetModelMetadataRequest.model_spec", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="metadata_field", - full_name="tensorflow.serving.GetModelMetadataRequest.metadata_field", - index=1, - number=2, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=356, - serialized_end=456, -) - - -_GETMODELMETADATARESPONSE_METADATAENTRY = _descriptor.Descriptor( - name="MetadataEntry", - full_name="tensorflow.serving.GetModelMetadataResponse.MetadataEntry", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="key", - full_name="tensorflow.serving.GetModelMetadataResponse.MetadataEntry.key", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="value", - full_name="tensorflow.serving.GetModelMetadataResponse.MetadataEntry.value", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b("8\001")), - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=616, - serialized_end=685, -) - -_GETMODELMETADATARESPONSE = _descriptor.Descriptor( - name="GetModelMetadataResponse", - full_name="tensorflow.serving.GetModelMetadataResponse", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="model_spec", - full_name="tensorflow.serving.GetModelMetadataResponse.model_spec", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="metadata", - full_name="tensorflow.serving.GetModelMetadataResponse.metadata", - index=1, - number=2, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[_GETMODELMETADATARESPONSE_METADATAENTRY], - enum_types=[], - options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=459, - serialized_end=685, -) - -_SIGNATUREDEFMAP_SIGNATUREDEFENTRY.fields_by_name[ - "value" -].message_type = tensorflow_dot_core_dot_protobuf_dot_meta__graph__pb2._SIGNATUREDEF -_SIGNATUREDEFMAP_SIGNATUREDEFENTRY.containing_type = _SIGNATUREDEFMAP -_SIGNATUREDEFMAP.fields_by_name["signature_def"].message_type = _SIGNATUREDEFMAP_SIGNATUREDEFENTRY -_GETMODELMETADATAREQUEST.fields_by_name[ - "model_spec" -].message_type = tensorflow__serving_dot_apis_dot_model__pb2._MODELSPEC -_GETMODELMETADATARESPONSE_METADATAENTRY.fields_by_name[ - "value" -].message_type = google_dot_protobuf_dot_any__pb2._ANY -_GETMODELMETADATARESPONSE_METADATAENTRY.containing_type = _GETMODELMETADATARESPONSE -_GETMODELMETADATARESPONSE.fields_by_name[ - "model_spec" -].message_type = tensorflow__serving_dot_apis_dot_model__pb2._MODELSPEC -_GETMODELMETADATARESPONSE.fields_by_name[ - "metadata" -].message_type = _GETMODELMETADATARESPONSE_METADATAENTRY -DESCRIPTOR.message_types_by_name["SignatureDefMap"] = _SIGNATUREDEFMAP -DESCRIPTOR.message_types_by_name["GetModelMetadataRequest"] = _GETMODELMETADATAREQUEST -DESCRIPTOR.message_types_by_name["GetModelMetadataResponse"] = _GETMODELMETADATARESPONSE -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -SignatureDefMap = _reflection.GeneratedProtocolMessageType( - "SignatureDefMap", - (_message.Message,), - dict( - SignatureDefEntry=_reflection.GeneratedProtocolMessageType( - "SignatureDefEntry", - (_message.Message,), - dict( - DESCRIPTOR=_SIGNATUREDEFMAP_SIGNATUREDEFENTRY, - __module__="tensorflow_serving.apis.get_model_metadata_pb2" - # @@protoc_insertion_point(class_scope:tensorflow.serving.SignatureDefMap.SignatureDefEntry) - ), - ), - DESCRIPTOR=_SIGNATUREDEFMAP, - __module__="tensorflow_serving.apis.get_model_metadata_pb2" - # @@protoc_insertion_point(class_scope:tensorflow.serving.SignatureDefMap) - ), -) -_sym_db.RegisterMessage(SignatureDefMap) -_sym_db.RegisterMessage(SignatureDefMap.SignatureDefEntry) - -GetModelMetadataRequest = _reflection.GeneratedProtocolMessageType( - "GetModelMetadataRequest", - (_message.Message,), - dict( - DESCRIPTOR=_GETMODELMETADATAREQUEST, - __module__="tensorflow_serving.apis.get_model_metadata_pb2" - # @@protoc_insertion_point(class_scope:tensorflow.serving.GetModelMetadataRequest) - ), -) -_sym_db.RegisterMessage(GetModelMetadataRequest) - -GetModelMetadataResponse = _reflection.GeneratedProtocolMessageType( - "GetModelMetadataResponse", - (_message.Message,), - dict( - MetadataEntry=_reflection.GeneratedProtocolMessageType( - "MetadataEntry", - (_message.Message,), - dict( - DESCRIPTOR=_GETMODELMETADATARESPONSE_METADATAENTRY, - __module__="tensorflow_serving.apis.get_model_metadata_pb2" - # @@protoc_insertion_point(class_scope:tensorflow.serving.GetModelMetadataResponse.MetadataEntry) - ), - ), - DESCRIPTOR=_GETMODELMETADATARESPONSE, - __module__="tensorflow_serving.apis.get_model_metadata_pb2" - # @@protoc_insertion_point(class_scope:tensorflow.serving.GetModelMetadataResponse) - ), -) -_sym_db.RegisterMessage(GetModelMetadataResponse) -_sym_db.RegisterMessage(GetModelMetadataResponse.MetadataEntry) - - -DESCRIPTOR.has_options = True -DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b("\370\001\001")) -_SIGNATUREDEFMAP_SIGNATUREDEFENTRY.has_options = True -_SIGNATUREDEFMAP_SIGNATUREDEFENTRY._options = _descriptor._ParseOptions( - descriptor_pb2.MessageOptions(), _b("8\001") -) -_GETMODELMETADATARESPONSE_METADATAENTRY.has_options = True -_GETMODELMETADATARESPONSE_METADATAENTRY._options = _descriptor._ParseOptions( - descriptor_pb2.MessageOptions(), _b("8\001") -) -# @@protoc_insertion_point(module_scope) diff --git a/src/sagemaker/tensorflow/tensorflow_serving/apis/inference_pb2.py b/src/sagemaker/tensorflow/tensorflow_serving/apis/inference_pb2.py deleted file mode 100755 index 3db862fe9b..0000000000 --- a/src/sagemaker/tensorflow/tensorflow_serving/apis/inference_pb2.py +++ /dev/null @@ -1,361 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: tensorflow_serving/apis/inference.proto - -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database -from google.protobuf import descriptor_pb2 - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from tensorflow_serving.apis import ( - classification_pb2 as tensorflow__serving_dot_apis_dot_classification__pb2, -) -from tensorflow_serving.apis import input_pb2 as tensorflow__serving_dot_apis_dot_input__pb2 -from tensorflow_serving.apis import model_pb2 as tensorflow__serving_dot_apis_dot_model__pb2 -from tensorflow_serving.apis import ( - regression_pb2 as tensorflow__serving_dot_apis_dot_regression__pb2, -) - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="tensorflow_serving/apis/inference.proto", - package="tensorflow.serving", - syntax="proto3", - serialized_pb=_b( - '\n\'tensorflow_serving/apis/inference.proto\x12\x12tensorflow.serving\x1a,tensorflow_serving/apis/classification.proto\x1a#tensorflow_serving/apis/input.proto\x1a#tensorflow_serving/apis/model.proto\x1a(tensorflow_serving/apis/regression.proto"W\n\rInferenceTask\x12\x31\n\nmodel_spec\x18\x01 \x01(\x0b\x32\x1d.tensorflow.serving.ModelSpec\x12\x13\n\x0bmethod_name\x18\x02 \x01(\t"\xdc\x01\n\x0fInferenceResult\x12\x31\n\nmodel_spec\x18\x01 \x01(\x0b\x32\x1d.tensorflow.serving.ModelSpec\x12I\n\x15\x63lassification_result\x18\x02 \x01(\x0b\x32(.tensorflow.serving.ClassificationResultH\x00\x12\x41\n\x11regression_result\x18\x03 \x01(\x0b\x32$.tensorflow.serving.RegressionResultH\x00\x42\x08\n\x06result"s\n\x15MultiInferenceRequest\x12\x30\n\x05tasks\x18\x01 \x03(\x0b\x32!.tensorflow.serving.InferenceTask\x12(\n\x05input\x18\x02 \x01(\x0b\x32\x19.tensorflow.serving.Input"N\n\x16MultiInferenceResponse\x12\x34\n\x07results\x18\x01 \x03(\x0b\x32#.tensorflow.serving.InferenceResultB\x03\xf8\x01\x01\x62\x06proto3' - ), - dependencies=[ - tensorflow__serving_dot_apis_dot_classification__pb2.DESCRIPTOR, - tensorflow__serving_dot_apis_dot_input__pb2.DESCRIPTOR, - tensorflow__serving_dot_apis_dot_model__pb2.DESCRIPTOR, - tensorflow__serving_dot_apis_dot_regression__pb2.DESCRIPTOR, - ], -) - - -_INFERENCETASK = _descriptor.Descriptor( - name="InferenceTask", - full_name="tensorflow.serving.InferenceTask", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="model_spec", - full_name="tensorflow.serving.InferenceTask.model_spec", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="method_name", - full_name="tensorflow.serving.InferenceTask.method_name", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=225, - serialized_end=312, -) - - -_INFERENCERESULT = _descriptor.Descriptor( - name="InferenceResult", - full_name="tensorflow.serving.InferenceResult", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="model_spec", - full_name="tensorflow.serving.InferenceResult.model_spec", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="classification_result", - full_name="tensorflow.serving.InferenceResult.classification_result", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="regression_result", - full_name="tensorflow.serving.InferenceResult.regression_result", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="result", - full_name="tensorflow.serving.InferenceResult.result", - index=0, - containing_type=None, - fields=[], - ) - ], - serialized_start=315, - serialized_end=535, -) - - -_MULTIINFERENCEREQUEST = _descriptor.Descriptor( - name="MultiInferenceRequest", - full_name="tensorflow.serving.MultiInferenceRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="tasks", - full_name="tensorflow.serving.MultiInferenceRequest.tasks", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="input", - full_name="tensorflow.serving.MultiInferenceRequest.input", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=537, - serialized_end=652, -) - - -_MULTIINFERENCERESPONSE = _descriptor.Descriptor( - name="MultiInferenceResponse", - full_name="tensorflow.serving.MultiInferenceResponse", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="results", - full_name="tensorflow.serving.MultiInferenceResponse.results", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ) - ], - extensions=[], - nested_types=[], - enum_types=[], - options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=654, - serialized_end=732, -) - -_INFERENCETASK.fields_by_name[ - "model_spec" -].message_type = tensorflow__serving_dot_apis_dot_model__pb2._MODELSPEC -_INFERENCERESULT.fields_by_name[ - "model_spec" -].message_type = tensorflow__serving_dot_apis_dot_model__pb2._MODELSPEC -_INFERENCERESULT.fields_by_name[ - "classification_result" -].message_type = tensorflow__serving_dot_apis_dot_classification__pb2._CLASSIFICATIONRESULT -_INFERENCERESULT.fields_by_name[ - "regression_result" -].message_type = tensorflow__serving_dot_apis_dot_regression__pb2._REGRESSIONRESULT -_INFERENCERESULT.oneofs_by_name["result"].fields.append( - _INFERENCERESULT.fields_by_name["classification_result"] -) -_INFERENCERESULT.fields_by_name[ - "classification_result" -].containing_oneof = _INFERENCERESULT.oneofs_by_name["result"] -_INFERENCERESULT.oneofs_by_name["result"].fields.append( - _INFERENCERESULT.fields_by_name["regression_result"] -) -_INFERENCERESULT.fields_by_name[ - "regression_result" -].containing_oneof = _INFERENCERESULT.oneofs_by_name["result"] -_MULTIINFERENCEREQUEST.fields_by_name["tasks"].message_type = _INFERENCETASK -_MULTIINFERENCEREQUEST.fields_by_name[ - "input" -].message_type = tensorflow__serving_dot_apis_dot_input__pb2._INPUT -_MULTIINFERENCERESPONSE.fields_by_name["results"].message_type = _INFERENCERESULT -DESCRIPTOR.message_types_by_name["InferenceTask"] = _INFERENCETASK -DESCRIPTOR.message_types_by_name["InferenceResult"] = _INFERENCERESULT -DESCRIPTOR.message_types_by_name["MultiInferenceRequest"] = _MULTIINFERENCEREQUEST -DESCRIPTOR.message_types_by_name["MultiInferenceResponse"] = _MULTIINFERENCERESPONSE -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -InferenceTask = _reflection.GeneratedProtocolMessageType( - "InferenceTask", - (_message.Message,), - dict( - DESCRIPTOR=_INFERENCETASK, - __module__="tensorflow_serving.apis.inference_pb2" - # @@protoc_insertion_point(class_scope:tensorflow.serving.InferenceTask) - ), -) -_sym_db.RegisterMessage(InferenceTask) - -InferenceResult = _reflection.GeneratedProtocolMessageType( - "InferenceResult", - (_message.Message,), - dict( - DESCRIPTOR=_INFERENCERESULT, - __module__="tensorflow_serving.apis.inference_pb2" - # @@protoc_insertion_point(class_scope:tensorflow.serving.InferenceResult) - ), -) -_sym_db.RegisterMessage(InferenceResult) - -MultiInferenceRequest = _reflection.GeneratedProtocolMessageType( - "MultiInferenceRequest", - (_message.Message,), - dict( - DESCRIPTOR=_MULTIINFERENCEREQUEST, - __module__="tensorflow_serving.apis.inference_pb2" - # @@protoc_insertion_point(class_scope:tensorflow.serving.MultiInferenceRequest) - ), -) -_sym_db.RegisterMessage(MultiInferenceRequest) - -MultiInferenceResponse = _reflection.GeneratedProtocolMessageType( - "MultiInferenceResponse", - (_message.Message,), - dict( - DESCRIPTOR=_MULTIINFERENCERESPONSE, - __module__="tensorflow_serving.apis.inference_pb2" - # @@protoc_insertion_point(class_scope:tensorflow.serving.MultiInferenceResponse) - ), -) -_sym_db.RegisterMessage(MultiInferenceResponse) - - -DESCRIPTOR.has_options = True -DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b("\370\001\001")) -# @@protoc_insertion_point(module_scope) diff --git a/src/sagemaker/tensorflow/tensorflow_serving/apis/input_pb2.py b/src/sagemaker/tensorflow/tensorflow_serving/apis/input_pb2.py deleted file mode 100755 index f8df1ab2a5..0000000000 --- a/src/sagemaker/tensorflow/tensorflow_serving/apis/input_pb2.py +++ /dev/null @@ -1,256 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: tensorflow_serving/apis/input.proto - -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database -from google.protobuf import descriptor_pb2 - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from tensorflow.core.example import example_pb2 as tensorflow_dot_core_dot_example_dot_example__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="tensorflow_serving/apis/input.proto", - package="tensorflow.serving", - syntax="proto3", - serialized_pb=_b( - '\n#tensorflow_serving/apis/input.proto\x12\x12tensorflow.serving\x1a%tensorflow/core/example/example.proto"4\n\x0b\x45xampleList\x12%\n\x08\x65xamples\x18\x01 \x03(\x0b\x32\x13.tensorflow.Example"e\n\x16\x45xampleListWithContext\x12%\n\x08\x65xamples\x18\x01 \x03(\x0b\x32\x13.tensorflow.Example\x12$\n\x07\x63ontext\x18\x02 \x01(\x0b\x32\x13.tensorflow.Example"\xa1\x01\n\x05Input\x12;\n\x0c\x65xample_list\x18\x01 \x01(\x0b\x32\x1f.tensorflow.serving.ExampleListB\x02(\x01H\x00\x12S\n\x19\x65xample_list_with_context\x18\x02 \x01(\x0b\x32*.tensorflow.serving.ExampleListWithContextB\x02(\x01H\x00\x42\x06\n\x04kindB\x03\xf8\x01\x01\x62\x06proto3' - ), - dependencies=[tensorflow_dot_core_dot_example_dot_example__pb2.DESCRIPTOR], -) - - -_EXAMPLELIST = _descriptor.Descriptor( - name="ExampleList", - full_name="tensorflow.serving.ExampleList", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="examples", - full_name="tensorflow.serving.ExampleList.examples", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ) - ], - extensions=[], - nested_types=[], - enum_types=[], - options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=98, - serialized_end=150, -) - - -_EXAMPLELISTWITHCONTEXT = _descriptor.Descriptor( - name="ExampleListWithContext", - full_name="tensorflow.serving.ExampleListWithContext", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="examples", - full_name="tensorflow.serving.ExampleListWithContext.examples", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="context", - full_name="tensorflow.serving.ExampleListWithContext.context", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=152, - serialized_end=253, -) - - -_INPUT = _descriptor.Descriptor( - name="Input", - full_name="tensorflow.serving.Input", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="example_list", - full_name="tensorflow.serving.Input.example_list", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b("(\001")), - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="example_list_with_context", - full_name="tensorflow.serving.Input.example_list_with_context", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b("(\001")), - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="kind", - full_name="tensorflow.serving.Input.kind", - index=0, - containing_type=None, - fields=[], - ) - ], - serialized_start=256, - serialized_end=417, -) - -_EXAMPLELIST.fields_by_name[ - "examples" -].message_type = tensorflow_dot_core_dot_example_dot_example__pb2._EXAMPLE -_EXAMPLELISTWITHCONTEXT.fields_by_name[ - "examples" -].message_type = tensorflow_dot_core_dot_example_dot_example__pb2._EXAMPLE -_EXAMPLELISTWITHCONTEXT.fields_by_name[ - "context" -].message_type = tensorflow_dot_core_dot_example_dot_example__pb2._EXAMPLE -_INPUT.fields_by_name["example_list"].message_type = _EXAMPLELIST -_INPUT.fields_by_name["example_list_with_context"].message_type = _EXAMPLELISTWITHCONTEXT -_INPUT.oneofs_by_name["kind"].fields.append(_INPUT.fields_by_name["example_list"]) -_INPUT.fields_by_name["example_list"].containing_oneof = _INPUT.oneofs_by_name["kind"] -_INPUT.oneofs_by_name["kind"].fields.append(_INPUT.fields_by_name["example_list_with_context"]) -_INPUT.fields_by_name["example_list_with_context"].containing_oneof = _INPUT.oneofs_by_name["kind"] -DESCRIPTOR.message_types_by_name["ExampleList"] = _EXAMPLELIST -DESCRIPTOR.message_types_by_name["ExampleListWithContext"] = _EXAMPLELISTWITHCONTEXT -DESCRIPTOR.message_types_by_name["Input"] = _INPUT -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -ExampleList = _reflection.GeneratedProtocolMessageType( - "ExampleList", - (_message.Message,), - dict( - DESCRIPTOR=_EXAMPLELIST, - __module__="tensorflow_serving.apis.input_pb2" - # @@protoc_insertion_point(class_scope:tensorflow.serving.ExampleList) - ), -) -_sym_db.RegisterMessage(ExampleList) - -ExampleListWithContext = _reflection.GeneratedProtocolMessageType( - "ExampleListWithContext", - (_message.Message,), - dict( - DESCRIPTOR=_EXAMPLELISTWITHCONTEXT, - __module__="tensorflow_serving.apis.input_pb2" - # @@protoc_insertion_point(class_scope:tensorflow.serving.ExampleListWithContext) - ), -) -_sym_db.RegisterMessage(ExampleListWithContext) - -Input = _reflection.GeneratedProtocolMessageType( - "Input", - (_message.Message,), - dict( - DESCRIPTOR=_INPUT, - __module__="tensorflow_serving.apis.input_pb2" - # @@protoc_insertion_point(class_scope:tensorflow.serving.Input) - ), -) -_sym_db.RegisterMessage(Input) - - -DESCRIPTOR.has_options = True -DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b("\370\001\001")) -_INPUT.fields_by_name["example_list"].has_options = True -_INPUT.fields_by_name["example_list"]._options = _descriptor._ParseOptions( - descriptor_pb2.FieldOptions(), _b("(\001") -) -_INPUT.fields_by_name["example_list_with_context"].has_options = True -_INPUT.fields_by_name["example_list_with_context"]._options = _descriptor._ParseOptions( - descriptor_pb2.FieldOptions(), _b("(\001") -) -# @@protoc_insertion_point(module_scope) diff --git a/src/sagemaker/tensorflow/tensorflow_serving/apis/model_pb2.py b/src/sagemaker/tensorflow/tensorflow_serving/apis/model_pb2.py deleted file mode 100755 index 5e07a816b7..0000000000 --- a/src/sagemaker/tensorflow/tensorflow_serving/apis/model_pb2.py +++ /dev/null @@ -1,126 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: tensorflow_serving/apis/model.proto - -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database -from google.protobuf import descriptor_pb2 - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.protobuf import wrappers_pb2 as google_dot_protobuf_dot_wrappers__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="tensorflow_serving/apis/model.proto", - package="tensorflow.serving", - syntax="proto3", - serialized_pb=_b( - '\n#tensorflow_serving/apis/model.proto\x12\x12tensorflow.serving\x1a\x1egoogle/protobuf/wrappers.proto"_\n\tModelSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12,\n\x07version\x18\x02 \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12\x16\n\x0esignature_name\x18\x03 \x01(\tB\x03\xf8\x01\x01\x62\x06proto3' - ), - dependencies=[google_dot_protobuf_dot_wrappers__pb2.DESCRIPTOR], -) - - -_MODELSPEC = _descriptor.Descriptor( - name="ModelSpec", - full_name="tensorflow.serving.ModelSpec", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="tensorflow.serving.ModelSpec.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="version", - full_name="tensorflow.serving.ModelSpec.version", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="signature_name", - full_name="tensorflow.serving.ModelSpec.signature_name", - index=2, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=91, - serialized_end=186, -) - -_MODELSPEC.fields_by_name[ - "version" -].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE -DESCRIPTOR.message_types_by_name["ModelSpec"] = _MODELSPEC -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -ModelSpec = _reflection.GeneratedProtocolMessageType( - "ModelSpec", - (_message.Message,), - dict( - DESCRIPTOR=_MODELSPEC, - __module__="tensorflow_serving.apis.model_pb2" - # @@protoc_insertion_point(class_scope:tensorflow.serving.ModelSpec) - ), -) -_sym_db.RegisterMessage(ModelSpec) - - -DESCRIPTOR.has_options = True -DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b("\370\001\001")) -# @@protoc_insertion_point(module_scope) diff --git a/src/sagemaker/tensorflow/tensorflow_serving/apis/model_service_pb2.py b/src/sagemaker/tensorflow/tensorflow_serving/apis/model_service_pb2.py deleted file mode 100755 index 512c9952ac..0000000000 --- a/src/sagemaker/tensorflow/tensorflow_serving/apis/model_service_pb2.py +++ /dev/null @@ -1,221 +0,0 @@ -# Copyright 2018 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: tensorflow_serving/apis/model_service.proto -# To regenerate run -# python -m grpc.tools.protoc --python_out=. --grpc_python_out=. -I. tensorflow_serving/apis/model_service.proto - -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database -from google.protobuf import descriptor_pb2 - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from tensorflow_serving.apis import ( - get_model_status_pb2 as tensorflow__serving_dot_apis_dot_get__model__status__pb2, -) - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="tensorflow_serving/apis/model_service.proto", - package="tensorflow.serving", - syntax="proto3", - serialized_pb=_b( - "\n+tensorflow_serving/apis/model_service.proto\x12\x12tensorflow.serving\x1a.tensorflow_serving/apis/get_model_status.proto2w\n\x0cModelService\x12g\n\x0eGetModelStatus\x12).tensorflow.serving.GetModelStatusRequest\x1a*.tensorflow.serving.GetModelStatusResponseB\x03\xf8\x01\x01\x62\x06proto3" - ), - dependencies=[tensorflow__serving_dot_apis_dot_get__model__status__pb2.DESCRIPTOR], -) -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - - -DESCRIPTOR.has_options = True -DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b("\370\001\001")) -try: - # THESE ELEMENTS WILL BE DEPRECATED. - # Please use the generated *_pb2_grpc.py files instead. - import grpc - from grpc.framework.common import cardinality - from grpc.framework.interfaces.face import utilities as face_utilities - from grpc.beta import implementations as beta_implementations - from grpc.beta import interfaces as beta_interfaces - - class ModelServiceStub(object): - """ModelService provides access to information about model versions - that have been handled by the model server. - """ - - def __init__(self, channel): - """Constructor. - - Args: - channel: A grpc.Channel. - """ - self.GetModelStatus = channel.unary_unary( - "/tensorflow.serving.ModelService/GetModelStatus", - request_serializer=tensorflow__serving_dot_apis_dot_get__model__status__pb2.GetModelStatusRequest.SerializeToString, - response_deserializer=tensorflow__serving_dot_apis_dot_get__model__status__pb2.GetModelStatusResponse.FromString, - ) - - class ModelServiceServicer(object): - """ModelService provides access to information about model versions - that have been handled by the model server. - """ - - def GetModelStatus(self, request, context): - """Gets status of model. If the ModelSpec in the request does not specify - version, information about all versions of the model will be returned. If - the ModelSpec in the request does specify a version, the status of only - that version will be returned. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def add_ModelServiceServicer_to_server(servicer, server): - rpc_method_handlers = { - "GetModelStatus": grpc.unary_unary_rpc_method_handler( - servicer.GetModelStatus, - request_deserializer=tensorflow__serving_dot_apis_dot_get__model__status__pb2.GetModelStatusRequest.FromString, - response_serializer=tensorflow__serving_dot_apis_dot_get__model__status__pb2.GetModelStatusResponse.SerializeToString, - ) - } - generic_handler = grpc.method_handlers_generic_handler( - "tensorflow.serving.ModelService", rpc_method_handlers - ) - server.add_generic_rpc_handlers((generic_handler,)) - - class BetaModelServiceServicer(object): - """The Beta API is deprecated for 0.15.0 and later. - - It is recommended to use the GA API (classes and functions in this - file not marked beta) for all further purposes. This class was generated - only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0.""" - - """ModelService provides access to information about model versions - that have been handled by the model server. - """ - - def GetModelStatus(self, request, context): - """Gets status of model. If the ModelSpec in the request does not specify - version, information about all versions of the model will be returned. If - the ModelSpec in the request does specify a version, the status of only - that version will be returned. - """ - context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) - - class BetaModelServiceStub(object): - """The Beta API is deprecated for 0.15.0 and later. - - It is recommended to use the GA API (classes and functions in this - file not marked beta) for all further purposes. This class was generated - only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0.""" - - """ModelService provides access to information about model versions - that have been handled by the model server. - """ - - def GetModelStatus( - self, request, timeout, metadata=None, with_call=False, protocol_options=None - ): - """Gets status of model. If the ModelSpec in the request does not specify - version, information about all versions of the model will be returned. If - the ModelSpec in the request does specify a version, the status of only - that version will be returned. - """ - raise NotImplementedError() - - GetModelStatus.future = None - - def beta_create_ModelService_server( - servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None - ): - """The Beta API is deprecated for 0.15.0 and later. - - It is recommended to use the GA API (classes and functions in this - file not marked beta) for all further purposes. This function was - generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0""" - request_deserializers = { - ( - "tensorflow.serving.ModelService", - "GetModelStatus", - ): tensorflow__serving_dot_apis_dot_get__model__status__pb2.GetModelStatusRequest.FromString - } - response_serializers = { - ( - "tensorflow.serving.ModelService", - "GetModelStatus", - ): tensorflow__serving_dot_apis_dot_get__model__status__pb2.GetModelStatusResponse.SerializeToString - } - method_implementations = { - ( - "tensorflow.serving.ModelService", - "GetModelStatus", - ): face_utilities.unary_unary_inline(servicer.GetModelStatus) - } - server_options = beta_implementations.server_options( - request_deserializers=request_deserializers, - response_serializers=response_serializers, - thread_pool=pool, - thread_pool_size=pool_size, - default_timeout=default_timeout, - maximum_timeout=maximum_timeout, - ) - return beta_implementations.server(method_implementations, options=server_options) - - def beta_create_ModelService_stub( - channel, host=None, metadata_transformer=None, pool=None, pool_size=None - ): - """The Beta API is deprecated for 0.15.0 and later. - - It is recommended to use the GA API (classes and functions in this - file not marked beta) for all further purposes. This function was - generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0""" - request_serializers = { - ( - "tensorflow.serving.ModelService", - "GetModelStatus", - ): tensorflow__serving_dot_apis_dot_get__model__status__pb2.GetModelStatusRequest.SerializeToString - } - response_deserializers = { - ( - "tensorflow.serving.ModelService", - "GetModelStatus", - ): tensorflow__serving_dot_apis_dot_get__model__status__pb2.GetModelStatusResponse.FromString - } - cardinalities = {"GetModelStatus": cardinality.Cardinality.UNARY_UNARY} - stub_options = beta_implementations.stub_options( - host=host, - metadata_transformer=metadata_transformer, - request_serializers=request_serializers, - response_deserializers=response_deserializers, - thread_pool=pool, - thread_pool_size=pool_size, - ) - return beta_implementations.dynamic_stub( - channel, "tensorflow.serving.ModelService", cardinalities, options=stub_options - ) - - -except ImportError: - pass -# @@protoc_insertion_point(module_scope) diff --git a/src/sagemaker/tensorflow/tensorflow_serving/apis/model_service_pb2_grpc.py b/src/sagemaker/tensorflow/tensorflow_serving/apis/model_service_pb2_grpc.py deleted file mode 100755 index 17a83e6be1..0000000000 --- a/src/sagemaker/tensorflow/tensorflow_serving/apis/model_service_pb2_grpc.py +++ /dev/null @@ -1,72 +0,0 @@ -# Copyright 2018 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: tensorflow_serving/apis/model_service.proto -# To regenerate run -# python -m grpc.tools.protoc --python_out=. --grpc_python_out=. -I. tensorflow_serving/apis/model_service.proto - -import grpc -from grpc.framework.common import cardinality -from grpc.framework.interfaces.face import utilities as face_utilities - -import tensorflow_serving.apis.get_model_status_pb2 as tensorflow__serving_dot_apis_dot_get__model__status__pb2 - - -class ModelServiceStub(object): - """ModelService provides access to information about model versions - that have been handled by the model server. - """ - - def __init__(self, channel): - """Constructor. - - Args: - channel: A grpc.Channel. - """ - self.GetModelStatus = channel.unary_unary( - "/tensorflow.serving.ModelService/GetModelStatus", - request_serializer=tensorflow__serving_dot_apis_dot_get__model__status__pb2.GetModelStatusRequest.SerializeToString, - response_deserializer=tensorflow__serving_dot_apis_dot_get__model__status__pb2.GetModelStatusResponse.FromString, - ) - - -class ModelServiceServicer(object): - """ModelService provides access to information about model versions - that have been handled by the model server. - """ - - def GetModelStatus(self, request, context): - """Gets status of model. If the ModelSpec in the request does not specify - version, information about all versions of the model will be returned. If - the ModelSpec in the request does specify a version, the status of only - that version will be returned. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - -def add_ModelServiceServicer_to_server(servicer, server): - rpc_method_handlers = { - "GetModelStatus": grpc.unary_unary_rpc_method_handler( - servicer.GetModelStatus, - request_deserializer=tensorflow__serving_dot_apis_dot_get__model__status__pb2.GetModelStatusRequest.FromString, - response_serializer=tensorflow__serving_dot_apis_dot_get__model__status__pb2.GetModelStatusResponse.SerializeToString, - ) - } - generic_handler = grpc.method_handlers_generic_handler( - "tensorflow.serving.ModelService", rpc_method_handlers - ) - server.add_generic_rpc_handlers((generic_handler,)) diff --git a/src/sagemaker/tensorflow/tensorflow_serving/apis/predict_pb2.py b/src/sagemaker/tensorflow/tensorflow_serving/apis/predict_pb2.py deleted file mode 100755 index 5b0d8d9137..0000000000 --- a/src/sagemaker/tensorflow/tensorflow_serving/apis/predict_pb2.py +++ /dev/null @@ -1,354 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: tensorflow_serving/apis/predict.proto - -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database -from google.protobuf import descriptor_pb2 - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from tensorflow.core.framework import ( - tensor_pb2 as tensorflow_dot_core_dot_framework_dot_tensor__pb2, -) -from tensorflow_serving.apis import model_pb2 as tensorflow__serving_dot_apis_dot_model__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="tensorflow_serving/apis/predict.proto", - package="tensorflow.serving", - syntax="proto3", - serialized_pb=_b( - '\n%tensorflow_serving/apis/predict.proto\x12\x12tensorflow.serving\x1a&tensorflow/core/framework/tensor.proto\x1a#tensorflow_serving/apis/model.proto"\xe2\x01\n\x0ePredictRequest\x12\x31\n\nmodel_spec\x18\x01 \x01(\x0b\x32\x1d.tensorflow.serving.ModelSpec\x12>\n\x06inputs\x18\x02 \x03(\x0b\x32..tensorflow.serving.PredictRequest.InputsEntry\x12\x15\n\routput_filter\x18\x03 \x03(\t\x1a\x46\n\x0bInputsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12&\n\x05value\x18\x02 \x01(\x0b\x32\x17.tensorflow.TensorProto:\x02\x38\x01"\xd0\x01\n\x0fPredictResponse\x12\x31\n\nmodel_spec\x18\x02 \x01(\x0b\x32\x1d.tensorflow.serving.ModelSpec\x12\x41\n\x07outputs\x18\x01 \x03(\x0b\x32\x30.tensorflow.serving.PredictResponse.OutputsEntry\x1aG\n\x0cOutputsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12&\n\x05value\x18\x02 \x01(\x0b\x32\x17.tensorflow.TensorProto:\x02\x38\x01\x42\x03\xf8\x01\x01\x62\x06proto3' - ), - dependencies=[ - tensorflow_dot_core_dot_framework_dot_tensor__pb2.DESCRIPTOR, - tensorflow__serving_dot_apis_dot_model__pb2.DESCRIPTOR, - ], -) - - -_PREDICTREQUEST_INPUTSENTRY = _descriptor.Descriptor( - name="InputsEntry", - full_name="tensorflow.serving.PredictRequest.InputsEntry", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="key", - full_name="tensorflow.serving.PredictRequest.InputsEntry.key", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="value", - full_name="tensorflow.serving.PredictRequest.InputsEntry.value", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b("8\001")), - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=295, - serialized_end=365, -) - -_PREDICTREQUEST = _descriptor.Descriptor( - name="PredictRequest", - full_name="tensorflow.serving.PredictRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="model_spec", - full_name="tensorflow.serving.PredictRequest.model_spec", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="inputs", - full_name="tensorflow.serving.PredictRequest.inputs", - index=1, - number=2, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="output_filter", - full_name="tensorflow.serving.PredictRequest.output_filter", - index=2, - number=3, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[_PREDICTREQUEST_INPUTSENTRY], - enum_types=[], - options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=139, - serialized_end=365, -) - - -_PREDICTRESPONSE_OUTPUTSENTRY = _descriptor.Descriptor( - name="OutputsEntry", - full_name="tensorflow.serving.PredictResponse.OutputsEntry", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="key", - full_name="tensorflow.serving.PredictResponse.OutputsEntry.key", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="value", - full_name="tensorflow.serving.PredictResponse.OutputsEntry.value", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b("8\001")), - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=505, - serialized_end=576, -) - -_PREDICTRESPONSE = _descriptor.Descriptor( - name="PredictResponse", - full_name="tensorflow.serving.PredictResponse", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="model_spec", - full_name="tensorflow.serving.PredictResponse.model_spec", - index=0, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="outputs", - full_name="tensorflow.serving.PredictResponse.outputs", - index=1, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[_PREDICTRESPONSE_OUTPUTSENTRY], - enum_types=[], - options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=368, - serialized_end=576, -) - -_PREDICTREQUEST_INPUTSENTRY.fields_by_name[ - "value" -].message_type = tensorflow_dot_core_dot_framework_dot_tensor__pb2._TENSORPROTO -_PREDICTREQUEST_INPUTSENTRY.containing_type = _PREDICTREQUEST -_PREDICTREQUEST.fields_by_name[ - "model_spec" -].message_type = tensorflow__serving_dot_apis_dot_model__pb2._MODELSPEC -_PREDICTREQUEST.fields_by_name["inputs"].message_type = _PREDICTREQUEST_INPUTSENTRY -_PREDICTRESPONSE_OUTPUTSENTRY.fields_by_name[ - "value" -].message_type = tensorflow_dot_core_dot_framework_dot_tensor__pb2._TENSORPROTO -_PREDICTRESPONSE_OUTPUTSENTRY.containing_type = _PREDICTRESPONSE -_PREDICTRESPONSE.fields_by_name[ - "model_spec" -].message_type = tensorflow__serving_dot_apis_dot_model__pb2._MODELSPEC -_PREDICTRESPONSE.fields_by_name["outputs"].message_type = _PREDICTRESPONSE_OUTPUTSENTRY -DESCRIPTOR.message_types_by_name["PredictRequest"] = _PREDICTREQUEST -DESCRIPTOR.message_types_by_name["PredictResponse"] = _PREDICTRESPONSE -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -PredictRequest = _reflection.GeneratedProtocolMessageType( - "PredictRequest", - (_message.Message,), - dict( - InputsEntry=_reflection.GeneratedProtocolMessageType( - "InputsEntry", - (_message.Message,), - dict( - DESCRIPTOR=_PREDICTREQUEST_INPUTSENTRY, - __module__="tensorflow_serving.apis.predict_pb2" - # @@protoc_insertion_point(class_scope:tensorflow.serving.PredictRequest.InputsEntry) - ), - ), - DESCRIPTOR=_PREDICTREQUEST, - __module__="tensorflow_serving.apis.predict_pb2" - # @@protoc_insertion_point(class_scope:tensorflow.serving.PredictRequest) - ), -) -_sym_db.RegisterMessage(PredictRequest) -_sym_db.RegisterMessage(PredictRequest.InputsEntry) - -PredictResponse = _reflection.GeneratedProtocolMessageType( - "PredictResponse", - (_message.Message,), - dict( - OutputsEntry=_reflection.GeneratedProtocolMessageType( - "OutputsEntry", - (_message.Message,), - dict( - DESCRIPTOR=_PREDICTRESPONSE_OUTPUTSENTRY, - __module__="tensorflow_serving.apis.predict_pb2" - # @@protoc_insertion_point(class_scope:tensorflow.serving.PredictResponse.OutputsEntry) - ), - ), - DESCRIPTOR=_PREDICTRESPONSE, - __module__="tensorflow_serving.apis.predict_pb2" - # @@protoc_insertion_point(class_scope:tensorflow.serving.PredictResponse) - ), -) -_sym_db.RegisterMessage(PredictResponse) -_sym_db.RegisterMessage(PredictResponse.OutputsEntry) - - -DESCRIPTOR.has_options = True -DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b("\370\001\001")) -_PREDICTREQUEST_INPUTSENTRY.has_options = True -_PREDICTREQUEST_INPUTSENTRY._options = _descriptor._ParseOptions( - descriptor_pb2.MessageOptions(), _b("8\001") -) -_PREDICTRESPONSE_OUTPUTSENTRY.has_options = True -_PREDICTRESPONSE_OUTPUTSENTRY._options = _descriptor._ParseOptions( - descriptor_pb2.MessageOptions(), _b("8\001") -) -# @@protoc_insertion_point(module_scope) diff --git a/src/sagemaker/tensorflow/tensorflow_serving/apis/prediction_service_pb2.py b/src/sagemaker/tensorflow/tensorflow_serving/apis/prediction_service_pb2.py deleted file mode 100755 index bc2924f17b..0000000000 --- a/src/sagemaker/tensorflow/tensorflow_serving/apis/prediction_service_pb2.py +++ /dev/null @@ -1,431 +0,0 @@ -# Copyright 2016 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: tensorflow_serving/apis/prediction_service.proto -# To regenerate run -# python -m grpc.tools.protoc --python_out=. --grpc_python_out=. -I. tensorflow_serving/apis/prediction_service.proto - -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database -from google.protobuf import descriptor_pb2 - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from tensorflow_serving.apis import ( - classification_pb2 as tensorflow__serving_dot_apis_dot_classification__pb2, -) -from tensorflow_serving.apis import ( - get_model_metadata_pb2 as tensorflow__serving_dot_apis_dot_get__model__metadata__pb2, -) -from tensorflow_serving.apis import inference_pb2 as tensorflow__serving_dot_apis_dot_inference__pb2 -from tensorflow_serving.apis import predict_pb2 as tensorflow__serving_dot_apis_dot_predict__pb2 -from tensorflow_serving.apis import ( - regression_pb2 as tensorflow__serving_dot_apis_dot_regression__pb2, -) - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="tensorflow_serving/apis/prediction_service.proto", - package="tensorflow.serving", - syntax="proto3", - serialized_pb=_b( - "\n0tensorflow_serving/apis/prediction_service.proto\x12\x12tensorflow.serving\x1a,tensorflow_serving/apis/classification.proto\x1a\x30tensorflow_serving/apis/get_model_metadata.proto\x1a'tensorflow_serving/apis/inference.proto\x1a%tensorflow_serving/apis/predict.proto\x1a(tensorflow_serving/apis/regression.proto2\xfc\x03\n\x11PredictionService\x12\x61\n\x08\x43lassify\x12).tensorflow.serving.ClassificationRequest\x1a*.tensorflow.serving.ClassificationResponse\x12X\n\x07Regress\x12%.tensorflow.serving.RegressionRequest\x1a&.tensorflow.serving.RegressionResponse\x12R\n\x07Predict\x12\".tensorflow.serving.PredictRequest\x1a#.tensorflow.serving.PredictResponse\x12g\n\x0eMultiInference\x12).tensorflow.serving.MultiInferenceRequest\x1a*.tensorflow.serving.MultiInferenceResponse\x12m\n\x10GetModelMetadata\x12+.tensorflow.serving.GetModelMetadataRequest\x1a,.tensorflow.serving.GetModelMetadataResponseB\x03\xf8\x01\x01\x62\x06proto3" - ), - dependencies=[ - tensorflow__serving_dot_apis_dot_classification__pb2.DESCRIPTOR, - tensorflow__serving_dot_apis_dot_get__model__metadata__pb2.DESCRIPTOR, - tensorflow__serving_dot_apis_dot_inference__pb2.DESCRIPTOR, - tensorflow__serving_dot_apis_dot_predict__pb2.DESCRIPTOR, - tensorflow__serving_dot_apis_dot_regression__pb2.DESCRIPTOR, - ], -) -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - - -DESCRIPTOR.has_options = True -DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b("\370\001\001")) -try: - # THESE ELEMENTS WILL BE DEPRECATED. - # Please use the generated *_pb2_grpc.py files instead. - import grpc - from grpc.framework.common import cardinality - from grpc.framework.interfaces.face import utilities as face_utilities - from grpc.beta import implementations as beta_implementations - from grpc.beta import interfaces as beta_interfaces - - class PredictionServiceStub(object): - """open source marker; do not remove - PredictionService provides access to machine-learned models loaded by - model_servers. - """ - - def __init__(self, channel): - """Constructor. - - Args: - channel: A grpc.Channel. - """ - self.Classify = channel.unary_unary( - "/tensorflow.serving.PredictionService/Classify", - request_serializer=tensorflow__serving_dot_apis_dot_classification__pb2.ClassificationRequest.SerializeToString, - response_deserializer=tensorflow__serving_dot_apis_dot_classification__pb2.ClassificationResponse.FromString, - ) - self.Regress = channel.unary_unary( - "/tensorflow.serving.PredictionService/Regress", - request_serializer=tensorflow__serving_dot_apis_dot_regression__pb2.RegressionRequest.SerializeToString, - response_deserializer=tensorflow__serving_dot_apis_dot_regression__pb2.RegressionResponse.FromString, - ) - self.Predict = channel.unary_unary( - "/tensorflow.serving.PredictionService/Predict", - request_serializer=tensorflow__serving_dot_apis_dot_predict__pb2.PredictRequest.SerializeToString, - response_deserializer=tensorflow__serving_dot_apis_dot_predict__pb2.PredictResponse.FromString, - ) - self.MultiInference = channel.unary_unary( - "/tensorflow.serving.PredictionService/MultiInference", - request_serializer=tensorflow__serving_dot_apis_dot_inference__pb2.MultiInferenceRequest.SerializeToString, - response_deserializer=tensorflow__serving_dot_apis_dot_inference__pb2.MultiInferenceResponse.FromString, - ) - self.GetModelMetadata = channel.unary_unary( - "/tensorflow.serving.PredictionService/GetModelMetadata", - request_serializer=tensorflow__serving_dot_apis_dot_get__model__metadata__pb2.GetModelMetadataRequest.SerializeToString, - response_deserializer=tensorflow__serving_dot_apis_dot_get__model__metadata__pb2.GetModelMetadataResponse.FromString, - ) - - class PredictionServiceServicer(object): - """open source marker; do not remove - PredictionService provides access to machine-learned models loaded by - model_servers. - """ - - def Classify(self, request, context): - """Classify. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def Regress(self, request, context): - """Regress. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def Predict(self, request, context): - """Predict -- provides access to loaded TensorFlow model. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def MultiInference(self, request, context): - """MultiInference API for multi-headed models. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def GetModelMetadata(self, request, context): - """GetModelMetadata - provides access to metadata for loaded models. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def add_PredictionServiceServicer_to_server(servicer, server): - rpc_method_handlers = { - "Classify": grpc.unary_unary_rpc_method_handler( - servicer.Classify, - request_deserializer=tensorflow__serving_dot_apis_dot_classification__pb2.ClassificationRequest.FromString, - response_serializer=tensorflow__serving_dot_apis_dot_classification__pb2.ClassificationResponse.SerializeToString, - ), - "Regress": grpc.unary_unary_rpc_method_handler( - servicer.Regress, - request_deserializer=tensorflow__serving_dot_apis_dot_regression__pb2.RegressionRequest.FromString, - response_serializer=tensorflow__serving_dot_apis_dot_regression__pb2.RegressionResponse.SerializeToString, - ), - "Predict": grpc.unary_unary_rpc_method_handler( - servicer.Predict, - request_deserializer=tensorflow__serving_dot_apis_dot_predict__pb2.PredictRequest.FromString, - response_serializer=tensorflow__serving_dot_apis_dot_predict__pb2.PredictResponse.SerializeToString, - ), - "MultiInference": grpc.unary_unary_rpc_method_handler( - servicer.MultiInference, - request_deserializer=tensorflow__serving_dot_apis_dot_inference__pb2.MultiInferenceRequest.FromString, - response_serializer=tensorflow__serving_dot_apis_dot_inference__pb2.MultiInferenceResponse.SerializeToString, - ), - "GetModelMetadata": grpc.unary_unary_rpc_method_handler( - servicer.GetModelMetadata, - request_deserializer=tensorflow__serving_dot_apis_dot_get__model__metadata__pb2.GetModelMetadataRequest.FromString, - response_serializer=tensorflow__serving_dot_apis_dot_get__model__metadata__pb2.GetModelMetadataResponse.SerializeToString, - ), - } - generic_handler = grpc.method_handlers_generic_handler( - "tensorflow.serving.PredictionService", rpc_method_handlers - ) - server.add_generic_rpc_handlers((generic_handler,)) - - class BetaPredictionServiceServicer(object): - """The Beta API is deprecated for 0.15.0 and later. - - It is recommended to use the GA API (classes and functions in this - file not marked beta) for all further purposes. This class was generated - only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0.""" - - """open source marker; do not remove - PredictionService provides access to machine-learned models loaded by - model_servers. - """ - - def Classify(self, request, context): - """Classify. - """ - context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) - - def Regress(self, request, context): - """Regress. - """ - context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) - - def Predict(self, request, context): - """Predict -- provides access to loaded TensorFlow model. - """ - context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) - - def MultiInference(self, request, context): - """MultiInference API for multi-headed models. - """ - context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) - - def GetModelMetadata(self, request, context): - """GetModelMetadata - provides access to metadata for loaded models. - """ - context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) - - class BetaPredictionServiceStub(object): - """The Beta API is deprecated for 0.15.0 and later. - - It is recommended to use the GA API (classes and functions in this - file not marked beta) for all further purposes. This class was generated - only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0.""" - - """open source marker; do not remove - PredictionService provides access to machine-learned models loaded by - model_servers. - """ - - def Classify(self, request, timeout, metadata=None, with_call=False, protocol_options=None): - """Classify. - """ - raise NotImplementedError() - - Classify.future = None - - def Regress(self, request, timeout, metadata=None, with_call=False, protocol_options=None): - """Regress. - """ - raise NotImplementedError() - - Regress.future = None - - def Predict(self, request, timeout, metadata=None, with_call=False, protocol_options=None): - """Predict -- provides access to loaded TensorFlow model. - """ - raise NotImplementedError() - - Predict.future = None - - def MultiInference( - self, request, timeout, metadata=None, with_call=False, protocol_options=None - ): - """MultiInference API for multi-headed models. - """ - raise NotImplementedError() - - MultiInference.future = None - - def GetModelMetadata( - self, request, timeout, metadata=None, with_call=False, protocol_options=None - ): - """GetModelMetadata - provides access to metadata for loaded models. - """ - raise NotImplementedError() - - GetModelMetadata.future = None - - def beta_create_PredictionService_server( - servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None - ): - """The Beta API is deprecated for 0.15.0 and later. - - It is recommended to use the GA API (classes and functions in this - file not marked beta) for all further purposes. This function was - generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0""" - request_deserializers = { - ( - "tensorflow.serving.PredictionService", - "Classify", - ): tensorflow__serving_dot_apis_dot_classification__pb2.ClassificationRequest.FromString, - ( - "tensorflow.serving.PredictionService", - "GetModelMetadata", - ): tensorflow__serving_dot_apis_dot_get__model__metadata__pb2.GetModelMetadataRequest.FromString, - ( - "tensorflow.serving.PredictionService", - "MultiInference", - ): tensorflow__serving_dot_apis_dot_inference__pb2.MultiInferenceRequest.FromString, - ( - "tensorflow.serving.PredictionService", - "Predict", - ): tensorflow__serving_dot_apis_dot_predict__pb2.PredictRequest.FromString, - ( - "tensorflow.serving.PredictionService", - "Regress", - ): tensorflow__serving_dot_apis_dot_regression__pb2.RegressionRequest.FromString, - } - response_serializers = { - ( - "tensorflow.serving.PredictionService", - "Classify", - ): tensorflow__serving_dot_apis_dot_classification__pb2.ClassificationResponse.SerializeToString, - ( - "tensorflow.serving.PredictionService", - "GetModelMetadata", - ): tensorflow__serving_dot_apis_dot_get__model__metadata__pb2.GetModelMetadataResponse.SerializeToString, - ( - "tensorflow.serving.PredictionService", - "MultiInference", - ): tensorflow__serving_dot_apis_dot_inference__pb2.MultiInferenceResponse.SerializeToString, - ( - "tensorflow.serving.PredictionService", - "Predict", - ): tensorflow__serving_dot_apis_dot_predict__pb2.PredictResponse.SerializeToString, - ( - "tensorflow.serving.PredictionService", - "Regress", - ): tensorflow__serving_dot_apis_dot_regression__pb2.RegressionResponse.SerializeToString, - } - method_implementations = { - ("tensorflow.serving.PredictionService", "Classify"): face_utilities.unary_unary_inline( - servicer.Classify - ), - ( - "tensorflow.serving.PredictionService", - "GetModelMetadata", - ): face_utilities.unary_unary_inline(servicer.GetModelMetadata), - ( - "tensorflow.serving.PredictionService", - "MultiInference", - ): face_utilities.unary_unary_inline(servicer.MultiInference), - ("tensorflow.serving.PredictionService", "Predict"): face_utilities.unary_unary_inline( - servicer.Predict - ), - ("tensorflow.serving.PredictionService", "Regress"): face_utilities.unary_unary_inline( - servicer.Regress - ), - } - server_options = beta_implementations.server_options( - request_deserializers=request_deserializers, - response_serializers=response_serializers, - thread_pool=pool, - thread_pool_size=pool_size, - default_timeout=default_timeout, - maximum_timeout=maximum_timeout, - ) - return beta_implementations.server(method_implementations, options=server_options) - - def beta_create_PredictionService_stub( - channel, host=None, metadata_transformer=None, pool=None, pool_size=None - ): - """The Beta API is deprecated for 0.15.0 and later. - - It is recommended to use the GA API (classes and functions in this - file not marked beta) for all further purposes. This function was - generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0""" - request_serializers = { - ( - "tensorflow.serving.PredictionService", - "Classify", - ): tensorflow__serving_dot_apis_dot_classification__pb2.ClassificationRequest.SerializeToString, - ( - "tensorflow.serving.PredictionService", - "GetModelMetadata", - ): tensorflow__serving_dot_apis_dot_get__model__metadata__pb2.GetModelMetadataRequest.SerializeToString, - ( - "tensorflow.serving.PredictionService", - "MultiInference", - ): tensorflow__serving_dot_apis_dot_inference__pb2.MultiInferenceRequest.SerializeToString, - ( - "tensorflow.serving.PredictionService", - "Predict", - ): tensorflow__serving_dot_apis_dot_predict__pb2.PredictRequest.SerializeToString, - ( - "tensorflow.serving.PredictionService", - "Regress", - ): tensorflow__serving_dot_apis_dot_regression__pb2.RegressionRequest.SerializeToString, - } - response_deserializers = { - ( - "tensorflow.serving.PredictionService", - "Classify", - ): tensorflow__serving_dot_apis_dot_classification__pb2.ClassificationResponse.FromString, - ( - "tensorflow.serving.PredictionService", - "GetModelMetadata", - ): tensorflow__serving_dot_apis_dot_get__model__metadata__pb2.GetModelMetadataResponse.FromString, - ( - "tensorflow.serving.PredictionService", - "MultiInference", - ): tensorflow__serving_dot_apis_dot_inference__pb2.MultiInferenceResponse.FromString, - ( - "tensorflow.serving.PredictionService", - "Predict", - ): tensorflow__serving_dot_apis_dot_predict__pb2.PredictResponse.FromString, - ( - "tensorflow.serving.PredictionService", - "Regress", - ): tensorflow__serving_dot_apis_dot_regression__pb2.RegressionResponse.FromString, - } - cardinalities = { - "Classify": cardinality.Cardinality.UNARY_UNARY, - "GetModelMetadata": cardinality.Cardinality.UNARY_UNARY, - "MultiInference": cardinality.Cardinality.UNARY_UNARY, - "Predict": cardinality.Cardinality.UNARY_UNARY, - "Regress": cardinality.Cardinality.UNARY_UNARY, - } - stub_options = beta_implementations.stub_options( - host=host, - metadata_transformer=metadata_transformer, - request_serializers=request_serializers, - response_deserializers=response_deserializers, - thread_pool=pool, - thread_pool_size=pool_size, - ) - return beta_implementations.dynamic_stub( - channel, "tensorflow.serving.PredictionService", cardinalities, options=stub_options - ) - - -except ImportError: - pass -# @@protoc_insertion_point(module_scope) diff --git a/src/sagemaker/tensorflow/tensorflow_serving/apis/regression_pb2.py b/src/sagemaker/tensorflow/tensorflow_serving/apis/regression_pb2.py deleted file mode 100755 index d3c92f9539..0000000000 --- a/src/sagemaker/tensorflow/tensorflow_serving/apis/regression_pb2.py +++ /dev/null @@ -1,291 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: tensorflow_serving/apis/regression.proto - -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database -from google.protobuf import descriptor_pb2 - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from tensorflow_serving.apis import input_pb2 as tensorflow__serving_dot_apis_dot_input__pb2 -from tensorflow_serving.apis import model_pb2 as tensorflow__serving_dot_apis_dot_model__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="tensorflow_serving/apis/regression.proto", - package="tensorflow.serving", - syntax="proto3", - serialized_pb=_b( - '\n(tensorflow_serving/apis/regression.proto\x12\x12tensorflow.serving\x1a#tensorflow_serving/apis/input.proto\x1a#tensorflow_serving/apis/model.proto"\x1b\n\nRegression\x12\r\n\x05value\x18\x01 \x01(\x02"G\n\x10RegressionResult\x12\x33\n\x0bregressions\x18\x01 \x03(\x0b\x32\x1e.tensorflow.serving.Regression"p\n\x11RegressionRequest\x12\x31\n\nmodel_spec\x18\x01 \x01(\x0b\x32\x1d.tensorflow.serving.ModelSpec\x12(\n\x05input\x18\x02 \x01(\x0b\x32\x19.tensorflow.serving.Input"}\n\x12RegressionResponse\x12\x31\n\nmodel_spec\x18\x02 \x01(\x0b\x32\x1d.tensorflow.serving.ModelSpec\x12\x34\n\x06result\x18\x01 \x01(\x0b\x32$.tensorflow.serving.RegressionResultB\x03\xf8\x01\x01\x62\x06proto3' - ), - dependencies=[ - tensorflow__serving_dot_apis_dot_input__pb2.DESCRIPTOR, - tensorflow__serving_dot_apis_dot_model__pb2.DESCRIPTOR, - ], -) - - -_REGRESSION = _descriptor.Descriptor( - name="Regression", - full_name="tensorflow.serving.Regression", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="value", - full_name="tensorflow.serving.Regression.value", - index=0, - number=1, - type=2, - cpp_type=6, - label=1, - has_default_value=False, - default_value=float(0), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ) - ], - extensions=[], - nested_types=[], - enum_types=[], - options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=138, - serialized_end=165, -) - - -_REGRESSIONRESULT = _descriptor.Descriptor( - name="RegressionResult", - full_name="tensorflow.serving.RegressionResult", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="regressions", - full_name="tensorflow.serving.RegressionResult.regressions", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ) - ], - extensions=[], - nested_types=[], - enum_types=[], - options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=167, - serialized_end=238, -) - - -_REGRESSIONREQUEST = _descriptor.Descriptor( - name="RegressionRequest", - full_name="tensorflow.serving.RegressionRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="model_spec", - full_name="tensorflow.serving.RegressionRequest.model_spec", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="input", - full_name="tensorflow.serving.RegressionRequest.input", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=240, - serialized_end=352, -) - - -_REGRESSIONRESPONSE = _descriptor.Descriptor( - name="RegressionResponse", - full_name="tensorflow.serving.RegressionResponse", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="model_spec", - full_name="tensorflow.serving.RegressionResponse.model_spec", - index=0, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="result", - full_name="tensorflow.serving.RegressionResponse.result", - index=1, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=354, - serialized_end=479, -) - -_REGRESSIONRESULT.fields_by_name["regressions"].message_type = _REGRESSION -_REGRESSIONREQUEST.fields_by_name[ - "model_spec" -].message_type = tensorflow__serving_dot_apis_dot_model__pb2._MODELSPEC -_REGRESSIONREQUEST.fields_by_name[ - "input" -].message_type = tensorflow__serving_dot_apis_dot_input__pb2._INPUT -_REGRESSIONRESPONSE.fields_by_name[ - "model_spec" -].message_type = tensorflow__serving_dot_apis_dot_model__pb2._MODELSPEC -_REGRESSIONRESPONSE.fields_by_name["result"].message_type = _REGRESSIONRESULT -DESCRIPTOR.message_types_by_name["Regression"] = _REGRESSION -DESCRIPTOR.message_types_by_name["RegressionResult"] = _REGRESSIONRESULT -DESCRIPTOR.message_types_by_name["RegressionRequest"] = _REGRESSIONREQUEST -DESCRIPTOR.message_types_by_name["RegressionResponse"] = _REGRESSIONRESPONSE -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -Regression = _reflection.GeneratedProtocolMessageType( - "Regression", - (_message.Message,), - dict( - DESCRIPTOR=_REGRESSION, - __module__="tensorflow_serving.apis.regression_pb2" - # @@protoc_insertion_point(class_scope:tensorflow.serving.Regression) - ), -) -_sym_db.RegisterMessage(Regression) - -RegressionResult = _reflection.GeneratedProtocolMessageType( - "RegressionResult", - (_message.Message,), - dict( - DESCRIPTOR=_REGRESSIONRESULT, - __module__="tensorflow_serving.apis.regression_pb2" - # @@protoc_insertion_point(class_scope:tensorflow.serving.RegressionResult) - ), -) -_sym_db.RegisterMessage(RegressionResult) - -RegressionRequest = _reflection.GeneratedProtocolMessageType( - "RegressionRequest", - (_message.Message,), - dict( - DESCRIPTOR=_REGRESSIONREQUEST, - __module__="tensorflow_serving.apis.regression_pb2" - # @@protoc_insertion_point(class_scope:tensorflow.serving.RegressionRequest) - ), -) -_sym_db.RegisterMessage(RegressionRequest) - -RegressionResponse = _reflection.GeneratedProtocolMessageType( - "RegressionResponse", - (_message.Message,), - dict( - DESCRIPTOR=_REGRESSIONRESPONSE, - __module__="tensorflow_serving.apis.regression_pb2" - # @@protoc_insertion_point(class_scope:tensorflow.serving.RegressionResponse) - ), -) -_sym_db.RegisterMessage(RegressionResponse) - - -DESCRIPTOR.has_options = True -DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b("\370\001\001")) -# @@protoc_insertion_point(module_scope) diff --git a/tests/unit/test_tf_estimator.py b/tests/unit/test_tf_estimator.py index 84caff2421..d19b4855ff 100644 --- a/tests/unit/test_tf_estimator.py +++ b/tests/unit/test_tf_estimator.py @@ -19,7 +19,7 @@ import pytest from mock import patch, Mock, MagicMock -from sagemaker.tensorflow import defaults, serving, TensorFlow, TensorFlowModel, TensorFlowPredictor +from sagemaker.tensorflow import defaults, serving, TensorFlow DATA_DIR = os.path.join(os.path.dirname(__file__), "..", "data") SCRIPT_FILE = "dummy_script.py" @@ -220,13 +220,12 @@ def test_create_model(sagemaker_session, tf_version): model = tf.create_model() assert model.sagemaker_session == sagemaker_session - assert model.framework_version == tf_version - assert model.py_version == tf.py_version - assert model.entry_point == SCRIPT_PATH + assert model._framework_version == tf_version + assert model.entry_point is None assert model.role == ROLE assert model.name == job_name - assert model.container_log_level == container_log_level - assert model.source_dir == source_dir + assert model._container_log_level == container_log_level + assert model.source_dir is None assert model.vpc_config is None assert model.enable_network_isolation() @@ -291,24 +290,6 @@ def test_create_model_with_custom_image(sagemaker_session): assert model.image == custom_image -@patch("sagemaker.utils.create_tar_file", MagicMock()) -def test_model(sagemaker_session, tf_version): - model = TensorFlowModel( - MODEL_DATA, role=ROLE, entry_point=SCRIPT_PATH, sagemaker_session=sagemaker_session - ) - predictor = model.deploy(1, INSTANCE_TYPE) - assert isinstance(predictor, TensorFlowPredictor) - - -@patch("sagemaker.fw_utils.tar_and_upload_dir", MagicMock()) -def test_model_image_accelerator(sagemaker_session): - model = TensorFlowModel( - MODEL_DATA, role=ROLE, entry_point=SCRIPT_PATH, sagemaker_session=sagemaker_session - ) - container_def = model.prepare_container_def(INSTANCE_TYPE, accelerator_type=ACCELERATOR_TYPE) - assert container_def["Image"] == _get_full_cpu_image_uri_with_ei(defaults.TF_VERSION) - - @patch("sagemaker.utils.create_tar_file", MagicMock()) def test_train_image_default(sagemaker_session): tf = TensorFlow( @@ -543,16 +524,6 @@ def test_estimator_py2_deprecation_warning(warning, sagemaker_session): assert estimator.py_version == "py2" warning.assert_called_with("tensorflow", "2.1.0") - model = TensorFlowModel( - MODEL_DATA, - role=ROLE, - entry_point=SCRIPT_PATH, - sagemaker_session=sagemaker_session, - py_version="py2", - ) - assert model.py_version == "py2" - warning.assert_called_with(model.__framework_name__, defaults.LATEST_PY2_VERSION) - @patch("sagemaker.fw_utils.empty_framework_version_warning") def test_empty_framework_version(warning, sagemaker_session): @@ -568,17 +539,6 @@ def test_empty_framework_version(warning, sagemaker_session): assert estimator.framework_version == defaults.TF_VERSION warning.assert_called_with(defaults.TF_VERSION, estimator.LATEST_VERSION) - model = TensorFlowModel( - MODEL_DATA, - role=ROLE, - entry_point=SCRIPT_PATH, - sagemaker_session=sagemaker_session, - framework_version=None, - ) - - assert model.framework_version == defaults.TF_VERSION - warning.assert_called_with(defaults.TF_VERSION, defaults.LATEST_VERSION) - def test_py2_version_deprecated(sagemaker_session): with pytest.raises(AttributeError) as e: diff --git a/tests/unit/test_tf_predictor.py b/tests/unit/test_tf_predictor.py deleted file mode 100644 index c313ed5fe8..0000000000 --- a/tests/unit/test_tf_predictor.py +++ /dev/null @@ -1,415 +0,0 @@ -# Copyright 2017-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"). You -# may not use this file except in compliance with the License. A copy of -# the License is located at -# -# http://aws.amazon.com/apache2.0/ -# -# or in the "license" file accompanying this file. This file is -# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF -# ANY KIND, either express or implied. See the License for the specific -# language governing permissions and limitations under the License. -from __future__ import absolute_import - -import io -import json -import sys - -from google.protobuf import json_format -import numpy as np -import pytest -from mock import Mock -import tensorflow as tf -import six -from six import BytesIO -from tensorflow.python.saved_model.signature_constants import ( - DEFAULT_SERVING_SIGNATURE_DEF_KEY, - PREDICT_INPUTS, -) - -from sagemaker.predictor import RealTimePredictor -from sagemaker.tensorflow.predictor import ( - tf_csv_serializer, - tf_deserializer, - tf_json_deserializer, - tf_json_serializer, - tf_serializer, -) -from sagemaker.tensorflow.tensorflow_serving.apis import classification_pb2 - -BUCKET_NAME = "mybucket" -ENDPOINT = "myendpoint" -REGION = "us-west-2" - -CLASSIFICATION_RESPONSE = { - "result": { - "classifications": [ - { - "classes": [ - {"label": "0", "score": 0.0012890376383438706}, - {"label": "1", "score": 0.9814321994781494}, - {"label": "2", "score": 0.017278732731938362}, - ] - } - ] - } -} - -CSV_CONTENT_TYPE = "text/csv" -JSON_CONTENT_TYPE = "application/json" -PROTO_CONTENT_TYPE = "application/octet-stream" - -ENDPOINT_DESC = {"EndpointConfigName": ENDPOINT} - -ENDPOINT_CONFIG_DESC = {"ProductionVariants": [{"ModelName": "model-1"}, {"ModelName": "model-2"}]} - - -@pytest.fixture() -def sagemaker_session(): - boto_mock = Mock(name="boto_session", region_name=REGION) - ims = Mock(name="sagemaker_session", boto_session=boto_mock) - ims.default_bucket = Mock(name="default_bucket", return_value=BUCKET_NAME) - ims.sagemaker_client.describe_endpoint = Mock(return_value=ENDPOINT_DESC) - ims.sagemaker_client.describe_endpoint_config = Mock(return_value=ENDPOINT_CONFIG_DESC) - return ims - - -def test_endpoint_initialization(sagemaker_session): - endpoint_name = "endpoint" - predictor = RealTimePredictor(endpoint=endpoint_name, sagemaker_session=sagemaker_session) - assert predictor.endpoint == endpoint_name - - -def test_classification_request_json(sagemaker_session): - data = [1, 2, 3] - predictor = RealTimePredictor( - endpoint=ENDPOINT, - sagemaker_session=sagemaker_session, - deserializer=tf_json_deserializer, - serializer=tf_json_serializer, - ) - - mock_response( - json.dumps(CLASSIFICATION_RESPONSE).encode("utf-8"), sagemaker_session, JSON_CONTENT_TYPE - ) - - result = predictor.predict(data) - - sagemaker_session.sagemaker_runtime_client.invoke_endpoint.assert_called_once_with( - Accept=JSON_CONTENT_TYPE, - Body="[1, 2, 3]", - ContentType=JSON_CONTENT_TYPE, - EndpointName="myendpoint", - ) - - assert result == CLASSIFICATION_RESPONSE - - -def test_classification_request_csv(sagemaker_session): - data = [1, 2, 3] - predictor = RealTimePredictor( - serializer=tf_csv_serializer, - deserializer=tf_deserializer, - sagemaker_session=sagemaker_session, - endpoint=ENDPOINT, - ) - - expected_response = json_format.Parse( - json.dumps(CLASSIFICATION_RESPONSE), classification_pb2.ClassificationResponse() - ).SerializeToString() - - mock_response(expected_response, sagemaker_session, PROTO_CONTENT_TYPE) - - result = predictor.predict(data) - - sagemaker_session.sagemaker_runtime_client.invoke_endpoint.assert_called_once_with( - Accept=PROTO_CONTENT_TYPE, - Body="1,2,3", - ContentType=CSV_CONTENT_TYPE, - EndpointName="myendpoint", - ) - - # python 2 and 3 protobuf serialization has different precision so I'm checking - # the version here - if sys.version_info < (3, 0): - assert ( - str(result) - == """result { - classifications { - classes { - label: "0" - score: 0.00128903763834 - } - classes { - label: "1" - score: 0.981432199478 - } - classes { - label: "2" - score: 0.0172787327319 - } - } -} -""" - ) - else: - assert ( - str(result) - == """result { - classifications { - classes { - label: "0" - score: 0.0012890376383438706 - } - classes { - label: "1" - score: 0.9814321994781494 - } - classes { - label: "2" - score: 0.017278732731938362 - } - } -} -""" - ) - - -def test_json_deserializer_should_work_with_predict_response(): - data = b"""{ -"outputs": { - "example_strings": { - "dtype": "DT_STRING", - "tensorShape": { - "dim": [ - { - "size": "3" - } - ] - }, - "stringVal": [ - "YXBwbGU=", - "YmFuYW5h", - "b3Jhbmdl" - ] - }, - "ages": { - "dtype": "DT_FLOAT", - "floatVal": [ - 4.954165935516357 - ], - "tensorShape": { - "dim": [ - { - "size": "1" - } - ] - } - } - }, - "modelSpec": { - "version": "1531758457", - "name": "generic_model", - "signatureName": "serving_default" - } -}""" - - stream = BytesIO(data) - - response = tf_json_deserializer(stream, "application/json") - - if six.PY2: - string_vals = ["apple", "banana", "orange"] - else: - string_vals = [b"apple", b"banana", b"orange"] - - assert response == { - "model_spec": { - "name": u"generic_model", - "signature_name": u"serving_default", - "version": {"value": 1531758457.0 if six.PY2 else 1531758457}, - }, - "outputs": { - u"ages": { - "dtype": 1, - "float_val": [4.954165935516357], - "tensor_shape": {"dim": [{"size": 1.0 if six.PY2 else 1}]}, - }, - u"example_strings": { - "dtype": 7, - "string_val": string_vals, - "tensor_shape": {"dim": [{"size": 3.0 if six.PY2 else 3}]}, - }, - }, - } - - -def test_classification_request_pb(sagemaker_session): - request = classification_pb2.ClassificationRequest() - request.model_spec.name = "generic_model" - request.model_spec.signature_name = DEFAULT_SERVING_SIGNATURE_DEF_KEY - example = request.input.example_list.examples.add() - example.features.feature[PREDICT_INPUTS].float_list.value.extend([6.4, 3.2, 4.5, 1.5]) - - predictor = RealTimePredictor( - sagemaker_session=sagemaker_session, - endpoint=ENDPOINT, - deserializer=tf_deserializer, - serializer=tf_serializer, - ) - - expected_response = classification_pb2.ClassificationResponse() - classes = expected_response.result.classifications.add().classes - - class_0 = classes.add() - class_0.label = "0" - class_0.score = 0.00128903763834 - - class_1 = classes.add() - class_1.label = "1" - class_1.score = 0.981432199478 - - class_2 = classes.add() - class_2.label = "2" - class_2.score = 0.0172787327319 - - mock_response(expected_response.SerializeToString(), sagemaker_session, PROTO_CONTENT_TYPE) - - result = predictor.predict(request) - - sagemaker_session.sagemaker_runtime_client.invoke_endpoint.assert_called_once_with( - Accept=PROTO_CONTENT_TYPE, - Body=request.SerializeToString(), - ContentType=PROTO_CONTENT_TYPE, - EndpointName="myendpoint", - ) - - # python 2 and 3 protobuf serialization has different precision so I'm checking - # the version here - if sys.version_info < (3, 0): - assert ( - str(result) - == """result { - classifications { - classes { - label: "0" - score: 0.00128903763834 - } - classes { - label: "1" - score: 0.981432199478 - } - classes { - label: "2" - score: 0.0172787327319 - } - } -} -""" - ) - else: - assert ( - str(result) - == """result { - classifications { - classes { - label: "0" - score: 0.0012890376383438706 - } - classes { - label: "1" - score: 0.9814321994781494 - } - classes { - label: "2" - score: 0.017278732731938362 - } - } -} -""" - ) - - -def test_predict_request_json(sagemaker_session): - data = [6.4, 3.2, 0.5, 1.5] - tensor_proto = tf.make_tensor_proto( - values=np.asarray(data), shape=[1, len(data)], dtype=tf.float32 - ) - predictor = RealTimePredictor( - sagemaker_session=sagemaker_session, - endpoint=ENDPOINT, - deserializer=tf_json_deserializer, - serializer=tf_json_serializer, - ) - - mock_response( - json.dumps(CLASSIFICATION_RESPONSE).encode("utf-8"), sagemaker_session, JSON_CONTENT_TYPE - ) - - result = predictor.predict(tensor_proto) - - sagemaker_session.sagemaker_runtime_client.invoke_endpoint.assert_called_once_with( - Accept=JSON_CONTENT_TYPE, - Body=json_format.MessageToJson(tensor_proto), - ContentType=JSON_CONTENT_TYPE, - EndpointName="myendpoint", - ) - - assert result == CLASSIFICATION_RESPONSE - - -def test_predict_tensor_request_csv(sagemaker_session): - data = [6.4, 3.2, 0.5, 1.5] - tensor_proto = tf.make_tensor_proto( - values=np.asarray(data), shape=[1, len(data)], dtype=tf.float32 - ) - predictor = RealTimePredictor( - serializer=tf_csv_serializer, - deserializer=tf_json_deserializer, - sagemaker_session=sagemaker_session, - endpoint=ENDPOINT, - ) - - mock_response( - json.dumps(CLASSIFICATION_RESPONSE).encode("utf-8"), sagemaker_session, JSON_CONTENT_TYPE - ) - - result = predictor.predict(tensor_proto) - - sagemaker_session.sagemaker_runtime_client.invoke_endpoint.assert_called_once_with( - Accept=JSON_CONTENT_TYPE, - Body="6.4,3.2,0.5,1.5", - ContentType=CSV_CONTENT_TYPE, - EndpointName="myendpoint", - ) - - assert result == CLASSIFICATION_RESPONSE - - -def mock_response(expected_response, sagemaker_session, content_type): - sagemaker_session.sagemaker_runtime_client.invoke_endpoint.return_value = { - "ContentType": content_type, - "Body": io.BytesIO(expected_response), - } - - -def test_json_serialize_dict(): - data = {"tensor1": [1, 2, 3], "tensor2": [4, 5, 6]} - serialized = tf_json_serializer(data) - # deserialize again for assertion, since dict order is not guaranteed - deserialized = json.loads(serialized) - assert deserialized == data - - -def test_json_serialize_dict_with_numpy(): - data = {"tensor1": np.asarray([1, 2, 3]), "tensor2": np.asarray([4, 5, 6])} - serialized = tf_json_serializer(data) - # deserialize again for assertion, since dict order is not guaranteed - deserialized = json.loads(serialized) - assert deserialized == {"tensor1": [1, 2, 3], "tensor2": [4, 5, 6]} - - -def test_json_serialize_numpy(): - data = np.asarray([[1, 2, 3], [4, 5, 6]]) - assert tf_json_serializer(data) == "[[1, 2, 3], [4, 5, 6]]" diff --git a/tests/unit/test_tfs.py b/tests/unit/test_tfs.py index bb8da27ef8..8f72ad536a 100644 --- a/tests/unit/test_tfs.py +++ b/tests/unit/test_tfs.py @@ -263,9 +263,7 @@ def test_estimator_deploy(sagemaker_session): job_name = "doing something" tf.fit(inputs="s3://mybucket/train", job_name=job_name) - predictor = tf.deploy( - INSTANCE_COUNT, INSTANCE_TYPE, endpoint_name="endpoint", endpoint_type="tensorflow-serving" - ) + predictor = tf.deploy(INSTANCE_COUNT, INSTANCE_TYPE, endpoint_name="endpoint") assert isinstance(predictor, Predictor) From 60aaf9defdaa8517f856d9fcbde60c953d99a30d Mon Sep 17 00:00:00 2001 From: Lauren Yu <6631887+laurenyu@users.noreply.github.com> Date: Thu, 28 May 2020 12:14:40 -0700 Subject: [PATCH 2/7] fix imports --- tests/integ/test_tfs.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/tests/integ/test_tfs.py b/tests/integ/test_tfs.py index 4d070fac85..19038d14da 100644 --- a/tests/integ/test_tfs.py +++ b/tests/integ/test_tfs.py @@ -13,11 +13,11 @@ from __future__ import absolute_import import tarfile - -import botocore.exceptions import os +import botocore.exceptions import pytest + import sagemaker import sagemaker.predictor import sagemaker.utils @@ -104,7 +104,6 @@ def tfs_predictor_with_model_and_entry_point_and_dependencies( predictor = model.deploy(1, "local", endpoint_name=endpoint_name) try: - yield predictor finally: predictor.delete_endpoint() From c592622c69bdfdd665d0636261fdd9471980ce39 Mon Sep 17 00:00:00 2001 From: Lauren Yu <6631887+laurenyu@users.noreply.github.com> Date: Thu, 28 May 2020 12:26:21 -0700 Subject: [PATCH 3/7] update sphinx docs --- doc/sagemaker.tensorflow.rst | 16 ---------------- 1 file changed, 16 deletions(-) diff --git a/doc/sagemaker.tensorflow.rst b/doc/sagemaker.tensorflow.rst index 5107afcfc2..cc08890edc 100644 --- a/doc/sagemaker.tensorflow.rst +++ b/doc/sagemaker.tensorflow.rst @@ -10,22 +10,6 @@ TensorFlow Estimator :undoc-members: :show-inheritance: -TensorFlow Model ----------------- - -.. autoclass:: sagemaker.tensorflow.model.TensorFlowModel - :members: - :undoc-members: - :show-inheritance: - -TensorFlow Predictor --------------------- - -.. autoclass:: sagemaker.tensorflow.model.TensorFlowPredictor - :members: - :undoc-members: - :show-inheritance: - TensorFlow Serving Model ------------------------ From 64f7095d3271cf141fe52333848be65d0eec7868 Mon Sep 17 00:00:00 2001 From: Lauren Yu <6631887+laurenyu@users.noreply.github.com> Date: Thu, 28 May 2020 14:28:51 -0700 Subject: [PATCH 4/7] use general csv serializer in tfs unit tests --- tests/unit/test_tfs.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/unit/test_tfs.py b/tests/unit/test_tfs.py index 8f72ad536a..173ab89874 100644 --- a/tests/unit/test_tfs.py +++ b/tests/unit/test_tfs.py @@ -19,8 +19,8 @@ import mock import pytest from mock import Mock +from sagemaker.predictor import csv_serializer from sagemaker.tensorflow import TensorFlow -from sagemaker.tensorflow.predictor import csv_serializer from sagemaker.tensorflow.serving import Model, Predictor JSON_CONTENT_TYPE = "application/json" From ed158e0dac31c4a36130f6fa3f8e198bcb71fff3 Mon Sep 17 00:00:00 2001 From: Lauren Yu <6631887+laurenyu@users.noreply.github.com> Date: Thu, 28 May 2020 14:42:15 -0700 Subject: [PATCH 5/7] don't assume create_model takes model_server_workers for airflow --- src/sagemaker/workflow/airflow.py | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/src/sagemaker/workflow/airflow.py b/src/sagemaker/workflow/airflow.py index 6d663bad94..a671b8625b 100644 --- a/src/sagemaker/workflow/airflow.py +++ b/src/sagemaker/workflow/airflow.py @@ -647,12 +647,14 @@ def model_config_from_estimator( elif isinstance(estimator, sagemaker.amazon.amazon_estimator.AmazonAlgorithmEstimatorBase): model = estimator.create_model(vpc_config_override=vpc_config_override) elif isinstance(estimator, sagemaker.estimator.Framework): - model = estimator.create_model( - model_server_workers=model_server_workers, - role=role, - vpc_config_override=vpc_config_override, - entry_point=estimator.entry_point, - ) + model_kwargs = { + "role": role, + "vpc_config_override": vpc_config_override, + "entry_point": estimator.entry_point, + } + if model_server_workers: + model_kwargs["model_server_workers"] = model_server_workers + model = estimator.create_model(**model_kwargs) else: raise TypeError( "Estimator must be one of sagemaker.estimator.Estimator, sagemaker.estimator.Framework" From ce6f566c07258a99bc918b940f7a79fa46f125f4 Mon Sep 17 00:00:00 2001 From: Lauren Yu <6631887+laurenyu@users.noreply.github.com> Date: Thu, 28 May 2020 15:46:40 -0700 Subject: [PATCH 6/7] add back transformer() because TFS doesn't accept model_server_workers --- src/sagemaker/tensorflow/estimator.py | 124 ++++++++++++++++++++++++++ src/sagemaker/workflow/airflow.py | 21 +++-- tests/integ/test_tfs.py | 5 +- tests/unit/test_tf_estimator.py | 109 ++++++++++++++++++++++ 4 files changed, 249 insertions(+), 10 deletions(-) diff --git a/src/sagemaker/tensorflow/estimator.py b/src/sagemaker/tensorflow/estimator.py index f14a3d2d9f..71b4138875 100644 --- a/src/sagemaker/tensorflow/estimator.py +++ b/src/sagemaker/tensorflow/estimator.py @@ -24,6 +24,7 @@ import sagemaker.fw_utils as fw from sagemaker.tensorflow import defaults from sagemaker.tensorflow.serving import Model +from sagemaker.transformer import Transformer from sagemaker.vpc_utils import VPC_CONFIG_DEFAULT logger = logging.getLogger("sagemaker") @@ -384,3 +385,126 @@ def train_image(self): ) return super(TensorFlow, self).train_image() + + def transformer( + self, + instance_count, + instance_type, + strategy=None, + assemble_with=None, + output_path=None, + output_kms_key=None, + accept=None, + env=None, + max_concurrent_transforms=None, + max_payload=None, + tags=None, + role=None, + volume_kms_key=None, + entry_point=None, + vpc_config_override=VPC_CONFIG_DEFAULT, + enable_network_isolation=None, + model_name=None, + ): + """Return a ``Transformer`` that uses a SageMaker Model based on the training job. It + reuses the SageMaker Session and base job name used by the Estimator. + + Args: + instance_count (int): Number of EC2 instances to use. + instance_type (str): Type of EC2 instance to use, for example, 'ml.c4.xlarge'. + strategy (str): The strategy used to decide how to batch records in a single request + (default: None). Valid values: 'MultiRecord' and 'SingleRecord'. + assemble_with (str): How the output is assembled (default: None). Valid values: 'Line' + or 'None'. + output_path (str): S3 location for saving the transform result. If not specified, + results are stored to a default bucket. + output_kms_key (str): Optional. KMS key ID for encrypting the transform output + (default: None). + accept (str): The accept header passed by the client to + the inference endpoint. If it is supported by the endpoint, + it will be the format of the batch transform output. + env (dict): Environment variables to be set for use during the transform job + (default: None). + max_concurrent_transforms (int): The maximum number of HTTP requests to be made to + each individual transform container at one time. + max_payload (int): Maximum size of the payload in a single HTTP request to the + container in MB. + tags (list[dict]): List of tags for labeling a transform job. If none specified, then + the tags used for the training job are used for the transform job. + role (str): The ``ExecutionRoleArn`` IAM Role ARN for the ``Model``, which is also + used during transform jobs. If not specified, the role from the Estimator will be + used. + volume_kms_key (str): Optional. KMS key ID for encrypting the volume attached to the ML + compute instance (default: None). + entry_point (str): Path (absolute or relative) to the local Python source file which + should be executed as the entry point to training. If not specified and + ``endpoint_type`` is 'tensorflow-serving', no entry point is used. If + ``endpoint_type`` is also ``None``, then the training entry point is used. + vpc_config_override (dict[str, list[str]]): Optional override for + the VpcConfig set on the model. + Default: use subnets and security groups from this Estimator. + + * 'Subnets' (list[str]): List of subnet ids. + * 'SecurityGroupIds' (list[str]): List of security group ids. + + enable_network_isolation (bool): Specifies whether container will + run in network isolation mode. Network isolation mode restricts + the container access to outside networks (such as the internet). + The container does not make any inbound or outbound network + calls. If True, a channel named "code" will be created for any + user entry script for inference. Also known as Internet-free mode. + If not specified, this setting is taken from the estimator's + current configuration. + model_name (str): Name to use for creating an Amazon SageMaker + model. If not specified, the name of the training job is used. + """ + role = role or self.role + + if self.latest_training_job is None: + logging.warning( + "No finished training job found associated with this estimator. Please make sure " + "this estimator is only used for building workflow config" + ) + return Transformer( + model_name or self._current_job_name, + instance_count, + instance_type, + strategy=strategy, + assemble_with=assemble_with, + output_path=output_path, + output_kms_key=output_kms_key, + accept=accept, + max_concurrent_transforms=max_concurrent_transforms, + max_payload=max_payload, + env=env or {}, + tags=tags, + base_transform_job_name=self.base_job_name, + volume_kms_key=volume_kms_key, + sagemaker_session=self.sagemaker_session, + ) + + if enable_network_isolation is None: + enable_network_isolation = self.enable_network_isolation() + + model = self.create_model( + role=role, + vpc_config_override=vpc_config_override, + entry_point=entry_point, + enable_network_isolation=enable_network_isolation, + name=model_name, + ) + + return model.transformer( + instance_count, + instance_type, + strategy=strategy, + assemble_with=assemble_with, + output_path=output_path, + output_kms_key=output_kms_key, + accept=accept, + env=env, + max_concurrent_transforms=max_concurrent_transforms, + max_payload=max_payload, + tags=tags, + volume_kms_key=volume_kms_key, + ) diff --git a/src/sagemaker/workflow/airflow.py b/src/sagemaker/workflow/airflow.py index a671b8625b..8ccb15e277 100644 --- a/src/sagemaker/workflow/airflow.py +++ b/src/sagemaker/workflow/airflow.py @@ -19,6 +19,7 @@ import sagemaker from sagemaker import fw_utils, job, utils, session, vpc_utils from sagemaker.amazon import amazon_estimator +from sagemaker.tensorflow import TensorFlow def prepare_framework(estimator, s3_operations): @@ -646,15 +647,19 @@ def model_config_from_estimator( ) elif isinstance(estimator, sagemaker.amazon.amazon_estimator.AmazonAlgorithmEstimatorBase): model = estimator.create_model(vpc_config_override=vpc_config_override) + elif isinstance(estimator, TensorFlow): + model = estimator.create_model( + role=role, + vpc_config_override=vpc_config_override, + entry_point=estimator.entry_point, + ) elif isinstance(estimator, sagemaker.estimator.Framework): - model_kwargs = { - "role": role, - "vpc_config_override": vpc_config_override, - "entry_point": estimator.entry_point, - } - if model_server_workers: - model_kwargs["model_server_workers"] = model_server_workers - model = estimator.create_model(**model_kwargs) + model = estimator.create_model( + model_server_workers=model_server_workers, + role=role, + vpc_config_override=vpc_config_override, + entry_point=estimator.entry_point, + ) else: raise TypeError( "Estimator must be one of sagemaker.estimator.Estimator, sagemaker.estimator.Framework" diff --git a/tests/integ/test_tfs.py b/tests/integ/test_tfs.py index 19038d14da..4d070fac85 100644 --- a/tests/integ/test_tfs.py +++ b/tests/integ/test_tfs.py @@ -13,11 +13,11 @@ from __future__ import absolute_import import tarfile -import os import botocore.exceptions -import pytest +import os +import pytest import sagemaker import sagemaker.predictor import sagemaker.utils @@ -104,6 +104,7 @@ def tfs_predictor_with_model_and_entry_point_and_dependencies( predictor = model.deploy(1, "local", endpoint_name=endpoint_name) try: + yield predictor finally: predictor.delete_endpoint() diff --git a/tests/unit/test_tf_estimator.py b/tests/unit/test_tf_estimator.py index d19b4855ff..82c5533a0d 100644 --- a/tests/unit/test_tf_estimator.py +++ b/tests/unit/test_tf_estimator.py @@ -19,6 +19,7 @@ import pytest from mock import patch, Mock, MagicMock +from sagemaker.estimator import _TrainingJob from sagemaker.tensorflow import defaults, serving, TensorFlow DATA_DIR = os.path.join(os.path.dirname(__file__), "..", "data") @@ -477,6 +478,114 @@ def test_attach_wrong_framework(sagemaker_session): assert "didn't use image for requested framework" in str(error) +@patch("sagemaker.tensorflow.estimator.TensorFlow.create_model") +def test_transformer_creation_with_optional_args(create_model, sagemaker_session): + model = Mock() + create_model.return_value = model + + tf = TensorFlow( + entry_point=SCRIPT_PATH, + role=ROLE, + sagemaker_session=sagemaker_session, + train_instance_count=INSTANCE_COUNT, + train_instance_type=INSTANCE_TYPE, + ) + tf.latest_training_job = _TrainingJob(sagemaker_session, "some-job-name") + + strategy = "SingleRecord" + assemble_with = "Line" + output_path = "s3://{}/batch-output".format(BUCKET_NAME) + kms_key = "kms" + accept_type = "text/bytes" + env = {"foo": "bar"} + max_concurrent_transforms = 3 + max_payload = 100 + tags = {"Key": "foo", "Value": "bar"} + new_role = "role" + vpc_config = {"Subnets": ["1234"], "SecurityGroupIds": ["5678"]} + model_name = "model-name" + + tf.transformer( + INSTANCE_COUNT, + INSTANCE_TYPE, + strategy=strategy, + assemble_with=assemble_with, + output_path=output_path, + output_kms_key=kms_key, + accept=accept_type, + env=env, + max_concurrent_transforms=max_concurrent_transforms, + max_payload=max_payload, + tags=tags, + role=new_role, + volume_kms_key=kms_key, + entry_point=SERVING_SCRIPT_FILE, + vpc_config_override=vpc_config, + enable_network_isolation=True, + model_name=model_name, + ) + + create_model.assert_called_with( + role=new_role, + vpc_config_override=vpc_config, + entry_point=SERVING_SCRIPT_FILE, + enable_network_isolation=True, + name=model_name, + ) + model.transformer.assert_called_with( + INSTANCE_COUNT, + INSTANCE_TYPE, + accept=accept_type, + assemble_with=assemble_with, + env=env, + max_concurrent_transforms=max_concurrent_transforms, + max_payload=max_payload, + output_kms_key=kms_key, + output_path=output_path, + strategy=strategy, + tags=tags, + volume_kms_key=kms_key, + ) + + +@patch("sagemaker.tensorflow.estimator.TensorFlow.create_model") +def test_transformer_creation_without_optional_args(create_model, sagemaker_session): + model = Mock() + create_model.return_value = model + + tf = TensorFlow( + entry_point=SCRIPT_PATH, + role=ROLE, + sagemaker_session=sagemaker_session, + train_instance_count=INSTANCE_COUNT, + train_instance_type=INSTANCE_TYPE, + ) + tf.latest_training_job = _TrainingJob(sagemaker_session, "some-job-name") + tf.transformer(INSTANCE_COUNT, INSTANCE_TYPE) + + create_model.assert_called_with( + role=ROLE, + vpc_config_override="VPC_CONFIG_DEFAULT", + entry_point=None, + enable_network_isolation=False, + name=None, + ) + model.transformer.assert_called_with( + INSTANCE_COUNT, + INSTANCE_TYPE, + accept=None, + assemble_with=None, + env=None, + max_concurrent_transforms=None, + max_payload=None, + output_kms_key=None, + output_path=None, + strategy=None, + tags=None, + volume_kms_key=None, + ) + + def test_attach_custom_image(sagemaker_session): training_image = "1.dkr.ecr.us-west-2.amazonaws.com/tensorflow_with_custom_binary:1.0" rjd = { From ae3abd4a73ae4938a64302a09cf016cbf5e6b1b1 Mon Sep 17 00:00:00 2001 From: Lauren Yu <6631887+laurenyu@users.noreply.github.com> Date: Thu, 28 May 2020 17:16:55 -0700 Subject: [PATCH 7/7] black format --- src/sagemaker/workflow/airflow.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/src/sagemaker/workflow/airflow.py b/src/sagemaker/workflow/airflow.py index 8ccb15e277..ae4c34fde7 100644 --- a/src/sagemaker/workflow/airflow.py +++ b/src/sagemaker/workflow/airflow.py @@ -649,9 +649,7 @@ def model_config_from_estimator( model = estimator.create_model(vpc_config_override=vpc_config_override) elif isinstance(estimator, TensorFlow): model = estimator.create_model( - role=role, - vpc_config_override=vpc_config_override, - entry_point=estimator.entry_point, + role=role, vpc_config_override=vpc_config_override, entry_point=estimator.entry_point ) elif isinstance(estimator, sagemaker.estimator.Framework): model = estimator.create_model(