@@ -331,11 +331,11 @@ def __init__(
331
331
s3_analysis_config_output_path (str): S3 prefix to store the analysis config output.
332
332
If this field is None, then the ``s3_output_path`` will be used
333
333
to store the ``analysis_config`` output.
334
- label (str): Target attribute of the model required by bias metrics.
335
- Specified as column name or index for CSV dataset or as JSONPath for JSONLines.
334
+ label (str): Target attribute of the model required by bias metrics. Specified as
335
+ column name or index for CSV dataset or as JMESPath expression for JSONLines.
336
336
*Required parameter* except for when the input dataset does not contain the label.
337
- features (List[str]): JSONPath for locating the feature columns for bias metrics if the
338
- dataset format is JSONLines.
337
+ features (List[str]): JMESPath expression to locate the feature columns for
338
+ bias metrics if the dataset format is JSONLines.
339
339
dataset_type (str): Format of the dataset. Valid values are ``"text/csv"`` for CSV,
340
340
``"application/jsonlines"`` for JSONLines, and
341
341
``"application/x-parquet"`` for Parquet.
@@ -717,11 +717,11 @@ def __init__(
717
717
``label_headers=['cat','dog','fish']`` and infer the predicted label to be ``'fish'``.
718
718
719
719
Args:
720
- label (str or int): Index or JSONPath location in the model output for the prediction.
721
- In case, this is a predicted label of the same type as the label in the dataset,
722
- no further arguments need to be specified.
723
- probability (str or int): Index or JSONPath location in the model output
724
- for the predicted score(s) .
720
+ label (str or int): Index or JMESPath expression to locate the prediction
721
+ in the model output. In case, this is a predicted label of the same type
722
+ as the label in the dataset, no further arguments need to be specified.
723
+ probability (str or int): Index or JMESPath expression to locate the predicted score(s)
724
+ in the model output .
725
725
probability_threshold (float): An optional value for binary prediction tasks in which
726
726
the model returns a probability, to indicate the threshold to convert the
727
727
prediction to a boolean value. Default is ``0.5``.
@@ -1646,9 +1646,9 @@ def run_explainability(
1646
1646
You can request multiple methods at once by passing in a list of
1647
1647
`~sagemaker.clarify.ExplainabilityConfig`.
1648
1648
model_scores (int or str or :class:`~sagemaker.clarify.ModelPredictedLabelConfig`):
1649
- Index or JSONPath to locate the predicted scores in the model output. This is not
1650
- required if the model output is a single score. Alternatively, it can be an instance
1651
- of :class:`~sagemaker.clarify.SageMakerClarifyProcessor`
1649
+ Index or JMESPath expression to locate the predicted scores in the model output.
1650
+ This is not required if the model output is a single score. Alternatively,
1651
+ it can be an instance of :class:`~sagemaker.clarify.SageMakerClarifyProcessor`
1652
1652
to provide more parameters like ``label_headers``.
1653
1653
wait (bool): Whether the call should wait until the job completes (default: True).
1654
1654
logs (bool): Whether to show the logs produced by the job.
@@ -1775,9 +1775,9 @@ def run_bias_and_explainability(
1775
1775
str or
1776
1776
:class:`~sagemaker.clarify.ModelPredictedLabelConfig`
1777
1777
):
1778
- Index or JSONPath to locate the predicted scores in the model output. This is not
1779
- required if the model output is a single score. Alternatively, it can be an instance
1780
- of :class:`~sagemaker.clarify.SageMakerClarifyProcessor`
1778
+ Index or JMESPath expression to locate the predicted scores in the model output.
1779
+ This is not required if the model output is a single score. Alternatively,
1780
+ it can be an instance of :class:`~sagemaker.clarify.SageMakerClarifyProcessor`
1781
1781
to provide more parameters like ``label_headers``.
1782
1782
wait (bool): Whether the call should wait until the job completes (default: True).
1783
1783
logs (bool): Whether to show the logs produced by the job.
0 commit comments