|
60 | 60 | HYPERPARAMETER_TUNING_JOB_NAME = "HyperParameterTuningJobName"
|
61 | 61 | PARENT_HYPERPARAMETER_TUNING_JOBS = "ParentHyperParameterTuningJobs"
|
62 | 62 | WARM_START_TYPE = "WarmStartType"
|
| 63 | +GRID_SEARCH = "GridSearch" |
63 | 64 |
|
64 | 65 | logger = logging.getLogger(__name__)
|
65 | 66 |
|
@@ -219,7 +220,7 @@ def __init__(
|
219 | 220 | metric_definitions: Optional[List[Dict[str, Union[str, PipelineVariable]]]] = None,
|
220 | 221 | strategy: Union[str, PipelineVariable] = "Bayesian",
|
221 | 222 | objective_type: Union[str, PipelineVariable] = "Maximize",
|
222 |
| - max_jobs: Union[int, PipelineVariable] = 1, |
| 223 | + max_jobs: Union[int, PipelineVariable] = None, |
223 | 224 | max_parallel_jobs: Union[int, PipelineVariable] = 1,
|
224 | 225 | tags: Optional[List[Dict[str, Union[str, PipelineVariable]]]] = None,
|
225 | 226 | base_tuning_job_name: Optional[str] = None,
|
@@ -258,7 +259,8 @@ def __init__(
|
258 | 259 | evaluating training jobs. This value can be either 'Minimize' or
|
259 | 260 | 'Maximize' (default: 'Maximize').
|
260 | 261 | max_jobs (int or PipelineVariable): Maximum total number of training jobs to start for
|
261 |
| - the hyperparameter tuning job (default: 1). |
| 262 | + the hyperparameter tuning job. The default value is unspecified fot the GridSearch strategy |
| 263 | + and the default value is 1 for all others strategies (default: None). |
262 | 264 | max_parallel_jobs (int or PipelineVariable): Maximum number of parallel training jobs to
|
263 | 265 | start (default: 1).
|
264 | 266 | tags (list[dict[str, str] or list[dict[str, PipelineVariable]]): List of tags for
|
@@ -311,7 +313,11 @@ def __init__(
|
311 | 313 |
|
312 | 314 | self.strategy = strategy
|
313 | 315 | self.objective_type = objective_type
|
| 316 | + # For the GridSearch strategy we expect the max_jobs equals None and recalculate it later. |
| 317 | + # For all other strategies for the backward compatibility we keep the default value as 1 (previous default value). |
314 | 318 | self.max_jobs = max_jobs
|
| 319 | + if max_jobs is None and strategy is not GRID_SEARCH: |
| 320 | + self.max_jobs = 1 |
315 | 321 | self.max_parallel_jobs = max_parallel_jobs
|
316 | 322 |
|
317 | 323 | self.tags = tags
|
@@ -1301,7 +1307,7 @@ def create(
|
1301 | 1307 | base_tuning_job_name=None,
|
1302 | 1308 | strategy="Bayesian",
|
1303 | 1309 | objective_type="Maximize",
|
1304 |
| - max_jobs=1, |
| 1310 | + max_jobs=None, |
1305 | 1311 | max_parallel_jobs=1,
|
1306 | 1312 | tags=None,
|
1307 | 1313 | warm_start_config=None,
|
@@ -1351,7 +1357,8 @@ def create(
|
1351 | 1357 | objective_type (str): The type of the objective metric for evaluating training jobs.
|
1352 | 1358 | This value can be either 'Minimize' or 'Maximize' (default: 'Maximize').
|
1353 | 1359 | max_jobs (int): Maximum total number of training jobs to start for the hyperparameter
|
1354 |
| - tuning job (default: 1). |
| 1360 | + tuning job. The default value is unspecified fot the GridSearch strategy |
| 1361 | + and the value is 1 for all others strategies (default: None). |
1355 | 1362 | max_parallel_jobs (int): Maximum number of parallel training jobs to start
|
1356 | 1363 | (default: 1).
|
1357 | 1364 | tags (list[dict]): List of tags for labeling the tuning job (default: None). For more,
|
|
0 commit comments