Skip to content

Commit 62ce65a

Browse files
authored
Merge branch 'master' into fix/pipeline-variable-kms-key
2 parents ec7bf63 + 59bace4 commit 62ce65a

File tree

11 files changed

+186
-13
lines changed

11 files changed

+186
-13
lines changed

CHANGELOG.md

+6
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,11 @@
11
# Changelog
22

3+
## v2.177.1 (2023-08-14)
4+
5+
### Bug Fixes and Other Changes
6+
7+
* chore: excessive jumpstart bucket logging
8+
39
## v2.177.0 (2023-08-11)
410

511
### Features

VERSION

+1-1
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
2.177.1.dev0
1+
2.177.2.dev0

src/sagemaker/jumpstart/model.py

+33-6
Original file line numberDiff line numberDiff line change
@@ -310,13 +310,34 @@ def _is_valid_model_id_hook():
310310

311311
super(JumpStartModel, self).__init__(**model_init_kwargs.to_kwargs_dict())
312312

313-
def _create_sagemaker_model(self, *args, **kwargs): # pylint: disable=unused-argument
313+
def _create_sagemaker_model(
314+
self,
315+
instance_type=None,
316+
accelerator_type=None,
317+
tags=None,
318+
serverless_inference_config=None,
319+
**kwargs,
320+
):
314321
"""Create a SageMaker Model Entity
315322
316323
Args:
317-
args: Positional arguments coming from the caller. This class does not require
318-
any so they are ignored.
319-
324+
instance_type (str): Optional. The EC2 instance type that this Model will be
325+
used for, this is only used to determine if the image needs GPU
326+
support or not. (Default: None).
327+
accelerator_type (str): Optional. Type of Elastic Inference accelerator to
328+
attach to an endpoint for model loading and inference, for
329+
example, 'ml.eia1.medium'. If not specified, no Elastic
330+
Inference accelerator will be attached to the endpoint. (Default: None).
331+
tags (List[dict[str, str]]): Optional. The list of tags to add to
332+
the model. Example: >>> tags = [{'Key': 'tagname', 'Value':
333+
'tagvalue'}] For more information about tags, see
334+
https://boto3.amazonaws.com/v1/documentation
335+
/api/latest/reference/services/sagemaker.html#SageMaker.Client.add_tags
336+
(Default: None).
337+
serverless_inference_config (sagemaker.serverless.ServerlessInferenceConfig):
338+
Optional. Specifies configuration related to serverless endpoint. Instance type is
339+
not provided in serverless inference. So this is used to find image URIs.
340+
(Default: None).
320341
kwargs: Keyword arguments coming from the caller. This class does not require
321342
any so they are ignored.
322343
"""
@@ -347,10 +368,16 @@ def _create_sagemaker_model(self, *args, **kwargs): # pylint: disable=unused-ar
347368
container_def,
348369
vpc_config=self.vpc_config,
349370
enable_network_isolation=self.enable_network_isolation(),
350-
tags=kwargs.get("tags"),
371+
tags=tags,
351372
)
352373
else:
353-
super(JumpStartModel, self)._create_sagemaker_model(*args, **kwargs)
374+
super(JumpStartModel, self)._create_sagemaker_model(
375+
instance_type=instance_type,
376+
accelerator_type=accelerator_type,
377+
tags=tags,
378+
serverless_inference_config=serverless_inference_config,
379+
**kwargs,
380+
)
354381

355382
def deploy(
356383
self,

src/sagemaker/model.py

+4-1
Original file line numberDiff line numberDiff line change
@@ -1375,7 +1375,10 @@ def deploy(
13751375
self._base_name = "-".join((self._base_name, compiled_model_suffix))
13761376

13771377
self._create_sagemaker_model(
1378-
instance_type, accelerator_type, tags, serverless_inference_config
1378+
instance_type=instance_type,
1379+
accelerator_type=accelerator_type,
1380+
tags=tags,
1381+
serverless_inference_config=serverless_inference_config,
13791382
)
13801383

13811384
serverless_inference_config_dict = (

src/sagemaker/model_monitor/clarify_model_monitoring.py

+24
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,7 @@
2727
from sagemaker.session import Session
2828
from sagemaker.utils import name_from_base
2929
from sagemaker.clarify import SageMakerClarifyProcessor, ModelPredictedLabelConfig
30+
from sagemaker.lineage._utils import get_resource_name_from_arn
3031

3132
_LOGGER = logging.getLogger(__name__)
3233

@@ -154,6 +155,29 @@ def list_executions(self):
154155
for execution in executions
155156
]
156157

158+
def get_latest_execution_logs(self, wait=False):
159+
"""Get the processing job logs for the most recent monitoring execution
160+
161+
Args:
162+
wait (bool): Whether the call should wait until the job completes (default: False).
163+
164+
Raises:
165+
ValueError: If no execution job or processing job for the last execution has run
166+
167+
Returns: None
168+
"""
169+
monitoring_executions = self.sagemaker_session.list_monitoring_executions(
170+
monitoring_schedule_name=self.monitoring_schedule_name
171+
)
172+
if len(monitoring_executions["MonitoringExecutionSummaries"]) == 0:
173+
raise ValueError("No execution jobs were kicked off.")
174+
if "ProcessingJobArn" not in monitoring_executions["MonitoringExecutionSummaries"][0]:
175+
raise ValueError("Processing Job did not run for the last execution")
176+
job_arn = monitoring_executions["MonitoringExecutionSummaries"][0]["ProcessingJobArn"]
177+
self.sagemaker_session.logs_for_processing_job(
178+
job_name=get_resource_name_from_arn(job_arn), wait=wait
179+
)
180+
157181
def _create_baselining_processor(self):
158182
"""Create and return a SageMakerClarifyProcessor object which will run the baselining job.
159183

src/sagemaker/model_monitor/model_monitoring.py

+24
Original file line numberDiff line numberDiff line change
@@ -63,6 +63,7 @@
6363
resolve_value_from_config,
6464
resolve_class_attribute_from_config,
6565
)
66+
from sagemaker.lineage._utils import get_resource_name_from_arn
6667

6768
DEFAULT_REPOSITORY_NAME = "sagemaker-model-monitor-analyzer"
6869

@@ -768,6 +769,29 @@ def list_executions(self):
768769

769770
return monitoring_executions
770771

772+
def get_latest_execution_logs(self, wait=False):
773+
"""Get the processing job logs for the most recent monitoring execution
774+
775+
Args:
776+
wait (bool): Whether the call should wait until the job completes (default: False).
777+
778+
Raises:
779+
ValueError: If no execution job or processing job for the last execution has run
780+
781+
Returns: None
782+
"""
783+
monitoring_executions = self.sagemaker_session.list_monitoring_executions(
784+
monitoring_schedule_name=self.monitoring_schedule_name
785+
)
786+
if len(monitoring_executions["MonitoringExecutionSummaries"]) == 0:
787+
raise ValueError("No execution jobs were kicked off.")
788+
if "ProcessingJobArn" not in monitoring_executions["MonitoringExecutionSummaries"][0]:
789+
raise ValueError("Processing Job did not run for the last execution")
790+
job_arn = monitoring_executions["MonitoringExecutionSummaries"][0]["ProcessingJobArn"]
791+
self.sagemaker_session.logs_for_processing_job(
792+
job_name=get_resource_name_from_arn(job_arn), wait=wait
793+
)
794+
771795
def update_monitoring_alert(
772796
self,
773797
monitoring_alert_name: str,

tests/integ/test_model_monitor.py

+44
Original file line numberDiff line numberDiff line change
@@ -486,6 +486,38 @@ def test_default_monitor_suggest_baseline_and_create_monitoring_schedule_with_cu
486486
assert len(summary["MonitoringScheduleSummaries"]) > 0
487487

488488

489+
def test_default_monitor_display_logs_errors(sagemaker_session):
490+
my_default_monitor = DefaultModelMonitor(role=ROLE, sagemaker_session=sagemaker_session)
491+
492+
data_captured_destination_s3_uri = os.path.join(
493+
"s3://",
494+
sagemaker_session.default_bucket(),
495+
"sagemaker-serving-batch-transform",
496+
str(uuid.uuid4()),
497+
)
498+
499+
batch_transform_input = BatchTransformInput(
500+
data_captured_destination_s3_uri=data_captured_destination_s3_uri,
501+
destination="/opt/ml/processing/output",
502+
dataset_format=MonitoringDatasetFormat.csv(header=False),
503+
)
504+
505+
my_default_monitor.create_monitoring_schedule(
506+
batch_transform_input=batch_transform_input,
507+
schedule_cron_expression=CronExpressionGenerator.hourly(),
508+
)
509+
510+
_wait_for_schedule_changes_to_apply(monitor=my_default_monitor)
511+
512+
try:
513+
my_default_monitor.get_latest_execution_logs(wait=False)
514+
except ValueError as ve:
515+
assert "No execution jobs were kicked off." in str(ve)
516+
517+
my_default_monitor.stop_monitoring_schedule()
518+
my_default_monitor.delete_monitoring_schedule()
519+
520+
489521
@pytest.mark.skipif(
490522
tests.integ.test_region() in tests.integ.NO_MODEL_MONITORING_REGIONS,
491523
reason="ModelMonitoring is not yet supported in this region.",
@@ -1643,6 +1675,7 @@ def test_byoc_monitor_attach_followed_by_baseline_and_update_monitoring_schedule
16431675
output_kms_key,
16441676
updated_volume_kms_key,
16451677
updated_output_kms_key,
1678+
capfd,
16461679
):
16471680
baseline_dataset = os.path.join(DATA_DIR, "monitor/baseline_dataset.csv")
16481681

@@ -1771,6 +1804,10 @@ def test_byoc_monitor_attach_followed_by_baseline_and_update_monitoring_schedule
17711804

17721805
_wait_for_schedule_changes_to_apply(monitor=my_attached_monitor)
17731806

1807+
_check_processing_logs_generated(
1808+
monitor=my_attached_monitor, schedule_description=schedule_description, capfd=capfd
1809+
)
1810+
17741811
my_attached_monitor.stop_monitoring_schedule()
17751812

17761813
_wait_for_schedule_changes_to_apply(monitor=my_attached_monitor)
@@ -1877,6 +1914,13 @@ def test_default_monitor_monitoring_alerts(sagemaker_session, predictor):
18771914
my_default_monitor.delete_monitoring_schedule()
18781915

18791916

1917+
def _check_processing_logs_generated(monitor, schedule_description, capfd):
1918+
monitor.get_latest_execution_logs(wait=False)
1919+
out, _ = capfd.readouterr()
1920+
assert len(out) > 0
1921+
assert schedule_description.get("LastMonitoringExecutionSummary")["ProcessingJobArn"] in out
1922+
1923+
18801924
def _wait_for_schedule_changes_to_apply(monitor):
18811925
"""Waits for the monitor to no longer be in the 'Pending' state. Updates take under a minute
18821926
to apply.

tests/unit/sagemaker/jumpstart/model/test_model.py

+6-1
Original file line numberDiff line numberDiff line change
@@ -589,7 +589,10 @@ def test_jumpstart_model_package_arn(
589589

590590
model = JumpStartModel(model_id=model_id)
591591

592-
model.deploy()
592+
tag = {"Key": "foo", "Value": "bar"}
593+
tags = [tag]
594+
595+
model.deploy(tags=tags)
593596

594597
self.assertEqual(
595598
mock_session.return_value.create_model.call_args[0][2],
@@ -599,6 +602,8 @@ def test_jumpstart_model_package_arn(
599602
},
600603
)
601604

605+
self.assertIn(tag, mock_session.return_value.create_model.call_args[1]["tags"])
606+
602607
@mock.patch("sagemaker.jumpstart.model.is_valid_model_id")
603608
@mock.patch("sagemaker.jumpstart.factory.model.Session")
604609
@mock.patch("sagemaker.jumpstart.accessors.JumpStartModelsAccessor.get_model_specs")

tests/unit/sagemaker/model/test_deploy.py

+18-3
Original file line numberDiff line numberDiff line change
@@ -159,7 +159,12 @@ def test_deploy_accelerator_type(
159159
accelerator_type=ACCELERATOR_TYPE,
160160
)
161161

162-
create_sagemaker_model.assert_called_with(INSTANCE_TYPE, ACCELERATOR_TYPE, None, None)
162+
create_sagemaker_model.assert_called_with(
163+
instance_type=INSTANCE_TYPE,
164+
accelerator_type=ACCELERATOR_TYPE,
165+
tags=None,
166+
serverless_inference_config=None,
167+
)
163168
production_variant.assert_called_with(
164169
MODEL_NAME,
165170
INSTANCE_TYPE,
@@ -271,7 +276,12 @@ def test_deploy_tags(create_sagemaker_model, production_variant, name_from_base,
271276
tags = [{"Key": "ModelName", "Value": "TestModel"}]
272277
model.deploy(instance_type=INSTANCE_TYPE, initial_instance_count=INSTANCE_COUNT, tags=tags)
273278

274-
create_sagemaker_model.assert_called_with(INSTANCE_TYPE, None, tags, None)
279+
create_sagemaker_model.assert_called_with(
280+
instance_type=INSTANCE_TYPE,
281+
accelerator_type=None,
282+
tags=tags,
283+
serverless_inference_config=None,
284+
)
275285
sagemaker_session.endpoint_from_production_variants.assert_called_with(
276286
name=ENDPOINT_NAME,
277287
production_variants=[BASE_PRODUCTION_VARIANT],
@@ -463,7 +473,12 @@ def test_deploy_serverless_inference(production_variant, create_sagemaker_model,
463473
serverless_inference_config=serverless_inference_config,
464474
)
465475

466-
create_sagemaker_model.assert_called_with(None, None, None, serverless_inference_config)
476+
create_sagemaker_model.assert_called_with(
477+
instance_type=None,
478+
accelerator_type=None,
479+
tags=None,
480+
serverless_inference_config=serverless_inference_config,
481+
)
467482
production_variant.assert_called_with(
468483
MODEL_NAME,
469484
None,

tests/unit/sagemaker/model/test_model_package.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -197,7 +197,7 @@ def test_create_sagemaker_model_include_tags(sagemaker_session):
197197
sagemaker_session=sagemaker_session,
198198
)
199199

200-
model_package._create_sagemaker_model(tags=tags)
200+
model_package.deploy(tags=tags, instance_type="ml.p2.xlarge", initial_instance_count=1)
201201

202202
sagemaker_session.create_model.assert_called_with(
203203
model_name,

tests/unit/sagemaker/monitor/test_clarify_model_monitor.py

+25
Original file line numberDiff line numberDiff line change
@@ -221,6 +221,14 @@
221221
"NetworkConfig": NETWORK_CONFIG._to_request_dict(),
222222
}
223223

224+
MONITORING_EXECUTIONS_EMPTY = {
225+
"MonitoringExecutionSummaries": [],
226+
}
227+
228+
MONITORING_EXECUTIONS_NO_PROCESSING_JOB = {
229+
"MonitoringExecutionSummaries": [{"MonitoringSchedule": "MonitoringSchedule"}],
230+
}
231+
224232
# For update API
225233
NEW_ROLE_ARN = "arn:aws:iam::012345678902:role/{}".format(ROLE)
226234
NEW_INSTANCE_COUNT = 2
@@ -1716,3 +1724,20 @@ def _test_model_explainability_monitor_delete_schedule(
17161724
sagemaker_session.sagemaker_client.delete_model_explainability_job_definition.assert_called_once_with(
17171725
JobDefinitionName=job_definition_name
17181726
)
1727+
1728+
1729+
def test_model_explainability_monitor_logs_failure(model_explainability_monitor, sagemaker_session):
1730+
sagemaker_session.list_monitoring_executions = MagicMock(
1731+
return_value=MONITORING_EXECUTIONS_EMPTY
1732+
)
1733+
try:
1734+
model_explainability_monitor.get_latest_execution_logs()
1735+
except ValueError as ve:
1736+
assert "No execution jobs were kicked off." in str(ve)
1737+
sagemaker_session.list_monitoring_executions = MagicMock(
1738+
return_value=MONITORING_EXECUTIONS_NO_PROCESSING_JOB
1739+
)
1740+
try:
1741+
model_explainability_monitor.get_latest_execution_logs()
1742+
except ValueError as ve:
1743+
assert "Processing Job did not run for the last execution" in str(ve)

0 commit comments

Comments
 (0)