diff --git a/CHANGELOG.rst b/CHANGELOG.rst index d21d3f80ee..cd4098a81d 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -2,6 +2,11 @@ CHANGELOG ========= +1.16.2 +====== + +* doc-fix: Change ``distribution`` to ``distributions`` + 1.16.1.post1 ============ diff --git a/src/sagemaker/mxnet/README.rst b/src/sagemaker/mxnet/README.rst index 00926965a6..631b3f9665 100644 --- a/src/sagemaker/mxnet/README.rst +++ b/src/sagemaker/mxnet/README.rst @@ -209,7 +209,7 @@ If you were previously relying on the default save method, you can now import on save(args.model_dir, model) -Lastly, if you were relying on the container launching a parameter server for use with distributed training, you must now set ``distribution`` to the following dictionary when creating an MXNet estimator: +Lastly, if you were relying on the container launching a parameter server for use with distributed training, you must now set ``distributions`` to the following dictionary when creating an MXNet estimator: .. code:: python @@ -217,7 +217,7 @@ Lastly, if you were relying on the container launching a parameter server for us estimator = MXNet('path-to-distributed-training-script.py', ..., - distribution={'parameter_server': {'enabled': True}}) + distributions={'parameter_server': {'enabled': True}}) Using third-party libraries @@ -323,7 +323,7 @@ The following are optional arguments. When you create an ``MXNet`` object, you c framework_version and py_version. Refer to: `SageMaker MXNet Docker Containers <#sagemaker-mxnet-docker-containers>`_ for details on what the Official images support and where to find the source code to build your custom image. -- ``distribution`` For versions 1.3 and above only. +- ``distributions`` For versions 1.3 and above only. Specifies information for how to run distributed training. To launch a parameter server during training, set this argument to: diff --git a/src/sagemaker/mxnet/estimator.py b/src/sagemaker/mxnet/estimator.py index a4c1c1fabd..adc056e66e 100644 --- a/src/sagemaker/mxnet/estimator.py +++ b/src/sagemaker/mxnet/estimator.py @@ -67,7 +67,7 @@ def __init__(self, entry_point, source_dir=None, hyperparameters=None, py_versio Examples: 123.dkr.ecr.us-west-2.amazonaws.com/my-custom-image:1.0 custom-image:latest. - distribution (dict): A dictionary with information on how to run distributed training + distributions (dict): A dictionary with information on how to run distributed training (default: None). **kwargs: Additional kwargs passed to the :class:`~sagemaker.estimator.Framework` constructor. """ diff --git a/src/sagemaker/tensorflow/estimator.py b/src/sagemaker/tensorflow/estimator.py index 34d5d2bcb4..ab7a249fc9 100644 --- a/src/sagemaker/tensorflow/estimator.py +++ b/src/sagemaker/tensorflow/estimator.py @@ -199,7 +199,7 @@ def __init__(self, training_steps=None, evaluation_steps=None, checkpoint_path=N custom-image:latest. script_mode (bool): If set to True will the estimator will use the Script Mode containers (default: False). This will be ignored if py_version is set to 'py3'. - distribution (dict): A dictionary with information on how to run distributed training + distributions (dict): A dictionary with information on how to run distributed training (default: None). Currently we only support distributed training with parameter servers. To enable it use the following setup: {