Skip to content

fix: Fix Tensorflow default model_dir generation when output_path is pipeline variable #3146

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Jun 2, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions src/sagemaker/tensorflow/estimator.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@
from sagemaker.tensorflow.model import TensorFlowModel
from sagemaker.transformer import Transformer
from sagemaker.vpc_utils import VPC_CONFIG_DEFAULT
from sagemaker.workflow import is_pipeline_variable

logger = logging.getLogger("sagemaker")

Expand Down Expand Up @@ -378,6 +379,9 @@ def _default_s3_path(self, directory, mpi=False):
if mpi:
return "/opt/ml/model"
if self._current_job_name:
if is_pipeline_variable(self.output_path):
output_path = "s3://{}".format(self.sagemaker_session.default_bucket())
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

what happens here if customer is using custom S3 bucket

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thanks for reviewing!
This update is within the _default_s3_path which means it's generating a default path for the model_dir output if user does not specify it. Thus I think that's fine.

If users hope to use custom s3 bucket they should explicitly assign a s3 path to model_dir field of TensorFlow

return s3.s3_path_join(output_path, self._current_job_name, directory)
return s3.s3_path_join(self.output_path, self._current_job_name, directory)
return None

Expand Down
6 changes: 5 additions & 1 deletion tests/integ/sagemaker/workflow/test_model_steps.py
Original file line number Diff line number Diff line change
Expand Up @@ -608,6 +608,9 @@ def test_model_registration_with_tensorflow_model_with_pipeline_model(
)
inputs = TrainingInput(s3_data=input_path)
instance_count = ParameterInteger(name="InstanceCount", default_value=1)
output_path = ParameterString(
name="OutputPath", default_value=f"s3://{pipeline_session.default_bucket()}"
)

# If image_uri is not provided, the instance_type should not be a pipeline variable
# since instance_type is used to retrieve image_uri in compile time (PySDK)
Expand All @@ -619,6 +622,7 @@ def test_model_registration_with_tensorflow_model_with_pipeline_model(
framework_version=tf_full_version,
py_version=tf_full_py_version,
sagemaker_session=pipeline_session,
output_path=output_path,
)
train_step_args = tensorflow_estimator.fit(inputs=inputs)
step_train = TrainingStep(
Expand Down Expand Up @@ -648,7 +652,7 @@ def test_model_registration_with_tensorflow_model_with_pipeline_model(
)
pipeline = Pipeline(
name=pipeline_name,
parameters=[instance_count],
parameters=[instance_count, output_path],
steps=[step_train, step_register_model],
sagemaker_session=pipeline_session,
)
Expand Down
45 changes: 37 additions & 8 deletions tests/unit/sagemaker/workflow/test_training_step.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
import os
import json
from mock import Mock, PropertyMock
import re

import pytest
import warnings
Expand Down Expand Up @@ -163,6 +164,7 @@ def test_training_step_with_estimator(pipeline_session, training_input, hyperpar

def test_estimator_with_parameterized_output(pipeline_session, training_input):
output_path = ParameterString(name="OutputPath")
# XGBoost
estimator = XGBoost(
framework_version="1.3-1",
py_version="py3",
Expand All @@ -174,21 +176,48 @@ def test_estimator_with_parameterized_output(pipeline_session, training_input):
sagemaker_session=pipeline_session,
)
step_args = estimator.fit(inputs=training_input)
step = TrainingStep(
name="MyTrainingStep",
step1 = TrainingStep(
name="MyTrainingStep1",
step_args=step_args,
description="TrainingStep description",
display_name="MyTrainingStep",
)

# TensorFlow
# If model_dir is None and output_path is a pipeline variable
# a default model_dir will be generated with default bucket
estimator = TensorFlow(
framework_version="2.4.1",
py_version="py37",
role=ROLE,
instance_type=INSTANCE_TYPE,
instance_count=1,
entry_point=DUMMY_LOCAL_SCRIPT_PATH,
output_path=output_path,
sagemaker_session=pipeline_session,
)
step_args = estimator.fit(inputs=training_input)
step2 = TrainingStep(
name="MyTrainingStep2",
step_args=step_args,
description="TrainingStep description",
display_name="MyTrainingStep",
)
pipeline = Pipeline(
name="MyPipeline",
steps=[step],
steps=[step1, step2],
parameters=[output_path],
sagemaker_session=pipeline_session,
)
step_def = json.loads(pipeline.definition())["Steps"][0]
assert step_def["Arguments"]["OutputDataConfig"]["S3OutputPath"] == {
"Get": "Parameters.OutputPath"
}
step_defs = json.loads(pipeline.definition())["Steps"]
for step_def in step_defs:
assert step_def["Arguments"]["OutputDataConfig"]["S3OutputPath"] == {
"Get": "Parameters.OutputPath"
}
if step_def["Name"] != "MyTrainingStep2":
continue
model_dir = step_def["Arguments"]["HyperParameters"]["model_dir"]
assert re.match(rf'"s3://{BUCKET}/.*/model"', model_dir)


@pytest.mark.parametrize(
Expand Down Expand Up @@ -316,7 +345,7 @@ def test_training_step_with_algorithm_base(algo_estimator, pipeline_session):
sagemaker_session=pipeline_session,
)
data = RecordSet(
"s3://{}/{}".format(pipeline_session.default_bucket(), "dummy"),
"s3://{}/{}".format(BUCKET, "dummy"),
num_records=1000,
feature_dim=128,
channel="train",
Expand Down