Skip to content

Commit f15343c

Browse files
icywang86ruiRui Wang Napieralski
and
Rui Wang Napieralski
authored
change: remove slow test_run_xxx_monitor_baseline tests (#2148)
* change: remove slow test_run_xxx_monitor_baseline tests * fix pylint error Co-authored-by: Rui Wang Napieralski <[email protected]>
1 parent 0c305db commit f15343c

File tree

2 files changed

+1
-122
lines changed

2 files changed

+1
-122
lines changed

src/sagemaker/analytics.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -43,6 +43,7 @@ class AnalyticsMetricsBase(with_metaclass(ABCMeta, object)):
4343
"""
4444

4545
def __init__(self):
46+
"""Initializes ``AnalyticsMetricsBase`` instance."""
4647
self._dataframe = None
4748

4849
def export_csv(self, filename):

tests/integ/test_clarify_model_monitor.py

Lines changed: 0 additions & 122 deletions
Original file line numberDiff line numberDiff line change
@@ -304,76 +304,6 @@ def test_run_bias_monitor(
304304
scheduled_bias_monitor.delete_monitoring_schedule()
305305

306306

307-
@pytest.mark.skipif(
308-
tests.integ.test_region() in tests.integ.NO_MODEL_MONITORING_REGIONS,
309-
reason="ModelMonitoring is not yet supported in this region.",
310-
)
311-
def test_run_bias_monitor_baseline(
312-
sagemaker_session,
313-
data_config,
314-
model_config,
315-
bias_config,
316-
model_predicted_label_config,
317-
endpoint_name,
318-
ground_truth_input,
319-
upload_actual_data,
320-
):
321-
monitor = ModelBiasMonitor(
322-
role=ROLE,
323-
instance_count=INSTANCE_COUNT,
324-
instance_type=INSTANCE_TYPE,
325-
volume_size_in_gb=VOLUME_SIZE_IN_GB,
326-
max_runtime_in_seconds=MAX_RUNTIME_IN_SECONDS,
327-
sagemaker_session=sagemaker_session,
328-
tags=TEST_TAGS,
329-
)
330-
331-
baselining_job_name = utils.unique_name_from_base("bias-baselining-job")
332-
print("Creating baselining job: {}".format(baselining_job_name))
333-
monitor.suggest_baseline(
334-
data_config=data_config,
335-
bias_config=bias_config,
336-
model_config=model_config,
337-
model_predicted_label_config=model_predicted_label_config,
338-
job_name=baselining_job_name,
339-
)
340-
assert (
341-
monitor.latest_baselining_job_config.probability_threshold_attribute
342-
== BIAS_PROBABILITY_THRESHOLD
343-
)
344-
monitoring_schedule_name = utils.unique_name_from_base("bias-suggest-baseline")
345-
s3_uri_monitoring_output = os.path.join(
346-
"s3://",
347-
sagemaker_session.default_bucket(),
348-
endpoint_name,
349-
monitoring_schedule_name,
350-
"monitor_output",
351-
)
352-
# Let's test if the schedule can pick up analysis_config from baselining job
353-
monitor.create_monitoring_schedule(
354-
output_s3_uri=s3_uri_monitoring_output,
355-
monitor_schedule_name=monitoring_schedule_name,
356-
endpoint_input=EndpointInput(
357-
endpoint_name=endpoint_name,
358-
destination=ENDPOINT_INPUT_LOCAL_PATH,
359-
start_time_offset=START_TIME_OFFSET,
360-
end_time_offset=END_TIME_OFFSET,
361-
),
362-
ground_truth_input=ground_truth_input,
363-
schedule_cron_expression=CRON,
364-
)
365-
_verify_execution_status(monitor)
366-
367-
_verify_bias_job_description(
368-
sagemaker_session=sagemaker_session,
369-
monitor=monitor,
370-
endpoint_name=endpoint_name,
371-
ground_truth_input=ground_truth_input,
372-
)
373-
374-
monitor.delete_monitoring_schedule()
375-
376-
377307
@pytest.fixture
378308
def explainability_monitor(sagemaker_session):
379309
monitor = ModelExplainabilityMonitor(
@@ -485,58 +415,6 @@ def test_run_explainability_monitor(
485415
scheduled_explainability_monitor.delete_monitoring_schedule()
486416

487417

488-
@pytest.mark.skipif(
489-
tests.integ.test_region() in tests.integ.NO_MODEL_MONITORING_REGIONS,
490-
reason="ModelMonitoring is not yet supported in this region.",
491-
)
492-
def test_run_explainability_monitor_baseline(
493-
sagemaker_session, shap_config, data_config, model_config, endpoint_name, upload_actual_data
494-
):
495-
monitor = ModelExplainabilityMonitor(
496-
role=ROLE,
497-
instance_count=INSTANCE_COUNT,
498-
instance_type=INSTANCE_TYPE,
499-
volume_size_in_gb=VOLUME_SIZE_IN_GB,
500-
max_runtime_in_seconds=MAX_RUNTIME_IN_SECONDS,
501-
sagemaker_session=sagemaker_session,
502-
tags=TEST_TAGS,
503-
)
504-
505-
baselining_job_name = utils.unique_name_from_base("explainability-baselining-job")
506-
print("Creating baselining job: {}".format(baselining_job_name))
507-
monitor.suggest_baseline(
508-
data_config=data_config,
509-
explainability_config=shap_config,
510-
model_config=model_config,
511-
job_name=baselining_job_name,
512-
)
513-
monitoring_schedule_name = utils.unique_name_from_base("explainability-suggest-baseline")
514-
s3_uri_monitoring_output = os.path.join(
515-
"s3://",
516-
sagemaker_session.default_bucket(),
517-
endpoint_name,
518-
monitoring_schedule_name,
519-
"monitor_output",
520-
)
521-
# Let's test if the schedule can pick up analysis_config from baselining job
522-
monitor.create_monitoring_schedule(
523-
output_s3_uri=s3_uri_monitoring_output,
524-
monitor_schedule_name=monitoring_schedule_name,
525-
endpoint_input=endpoint_name,
526-
schedule_cron_expression=CRON,
527-
)
528-
529-
_verify_execution_status(monitor)
530-
531-
_verify_explainability_job_description(
532-
sagemaker_session=sagemaker_session,
533-
monitor=monitor,
534-
endpoint_name=endpoint_name,
535-
)
536-
537-
monitor.delete_monitoring_schedule()
538-
539-
540418
def _verify_monitoring_schedule(monitor, schedule_status, schedule_cron_expression=CRON):
541419
desc = monitor.describe_schedule()
542420
assert desc["MonitoringScheduleName"] == monitor.monitoring_schedule_name

0 commit comments

Comments
 (0)