Skip to content

Add data_type to hyperparameters #54

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 5 commits into from
Jan 24, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions src/sagemaker/amazon/amazon_estimator.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,8 +28,8 @@ class AmazonAlgorithmEstimatorBase(EstimatorBase):
"""Base class for Amazon first-party Estimator implementations. This class isn't intended
to be instantiated directly."""

feature_dim = hp('feature_dim', (validation.isint, validation.gt(0)))
mini_batch_size = hp('mini_batch_size', (validation.isint, validation.gt(0)))
feature_dim = hp('feature_dim', validation.gt(0), data_type=int)
mini_batch_size = hp('mini_batch_size', validation.gt(0), data_type=int)

def __init__(self, role, train_instance_count, train_instance_type, data_location=None, **kwargs):
"""Initialize an AmazonAlgorithmEstimatorBase.
Expand Down
50 changes: 25 additions & 25 deletions src/sagemaker/amazon/factorization_machines.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
from sagemaker.amazon.amazon_estimator import AmazonAlgorithmEstimatorBase, registry
from sagemaker.amazon.common import numpy_to_record_serializer, record_deserializer
from sagemaker.amazon.hyperparameter import Hyperparameter as hp # noqa
from sagemaker.amazon.validation import gt, isin, isint, ge, isnumber
from sagemaker.amazon.validation import gt, isin, ge
from sagemaker.predictor import RealTimePredictor
from sagemaker.model import Model
from sagemaker.session import Session
Expand All @@ -23,34 +23,34 @@ class FactorizationMachines(AmazonAlgorithmEstimatorBase):

repo = 'factorization-machines:1'

num_factors = hp('num_factors', (gt(0), isint), 'An integer greater than zero')
num_factors = hp('num_factors', gt(0), 'An integer greater than zero', int)
predictor_type = hp('predictor_type', isin('binary_classifier', 'regressor'),
'Value "binary_classifier" or "regressor"')
epochs = hp('epochs', (gt(0), isint), "An integer greater than 0")
clip_gradient = hp('clip_gradient', isnumber, "A float value")
eps = hp('eps', isnumber, "A float value")
rescale_grad = hp('rescale_grad', isnumber, "A float value")
bias_lr = hp('bias_lr', (ge(0), isnumber), "A non-negative float")
linear_lr = hp('linear_lr', (ge(0), isnumber), "A non-negative float")
factors_lr = hp('factors_lr', (ge(0), isnumber), "A non-negative float")
bias_wd = hp('bias_wd', (ge(0), isnumber), "A non-negative float")
linear_wd = hp('linear_wd', (ge(0), isnumber), "A non-negative float")
factors_wd = hp('factors_wd', (ge(0), isnumber), "A non-negative float")
'Value "binary_classifier" or "regressor"', str)
epochs = hp('epochs', gt(0), "An integer greater than 0", int)
clip_gradient = hp('clip_gradient', (), "A float value", float)
eps = hp('eps', (), "A float value", float)
rescale_grad = hp('rescale_grad', (), "A float value", float)
bias_lr = hp('bias_lr', ge(0), "A non-negative float", float)
linear_lr = hp('linear_lr', ge(0), "A non-negative float", float)
factors_lr = hp('factors_lr', ge(0), "A non-negative float", float)
bias_wd = hp('bias_wd', ge(0), "A non-negative float", float)
linear_wd = hp('linear_wd', ge(0), "A non-negative float", float)
factors_wd = hp('factors_wd', ge(0), "A non-negative float", float)
bias_init_method = hp('bias_init_method', isin('normal', 'uniform', 'constant'),
'Value "normal", "uniform" or "constant"')
bias_init_scale = hp('bias_init_scale', (ge(0), isnumber), "A non-negative float")
bias_init_sigma = hp('bias_init_sigma', (ge(0), isnumber), "A non-negative float")
bias_init_value = hp('bias_init_value', isnumber, "A float value")
'Value "normal", "uniform" or "constant"', str)
bias_init_scale = hp('bias_init_scale', ge(0), "A non-negative float", float)
bias_init_sigma = hp('bias_init_sigma', ge(0), "A non-negative float", float)
bias_init_value = hp('bias_init_value', (), "A float value", float)
linear_init_method = hp('linear_init_method', isin('normal', 'uniform', 'constant'),
'Value "normal", "uniform" or "constant"')
linear_init_scale = hp('linear_init_scale', (ge(0), isnumber), "A non-negative float")
linear_init_sigma = hp('linear_init_sigma', (ge(0), isnumber), "A non-negative float")
linear_init_value = hp('linear_init_value', isnumber, "A float value")
'Value "normal", "uniform" or "constant"', str)
linear_init_scale = hp('linear_init_scale', ge(0), "A non-negative float", float)
linear_init_sigma = hp('linear_init_sigma', ge(0), "A non-negative float", float)
linear_init_value = hp('linear_init_value', (), "A float value", float)
factors_init_method = hp('factors_init_method', isin('normal', 'uniform', 'constant'),
'Value "normal", "uniform" or "constant"')
factors_init_scale = hp('factors_init_scale', (ge(0), isnumber), "A non-negative float")
factors_init_sigma = hp('factors_init_sigma', (ge(0), isnumber), "A non-negative float")
factors_init_value = hp('factors_init_value', isnumber, "A float value")
'Value "normal", "uniform" or "constant"', str)
factors_init_scale = hp('factors_init_scale', ge(0), "A non-negative float", float)
factors_init_sigma = hp('factors_init_sigma', ge(0), "A non-negative float", float)
factors_init_value = hp('factors_init_value', (), "A float value", float)

def __init__(self, role, train_instance_count, train_instance_type,
num_factors, predictor_type,
Expand Down
7 changes: 5 additions & 2 deletions src/sagemaker/amazon/hyperparameter.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ class Hyperparameter(object):
"""An algorithm hyperparameter with optional validation. Implemented as a python
descriptor object."""

def __init__(self, name, validate=lambda _: True, validation_message=""):
def __init__(self, name, validate=lambda _: True, validation_message="", data_type=str):
"""Args:
name (str): The name of this hyperparameter
validate (callable[object]->[bool]): A validation function or list of validation functions.
Expand All @@ -27,6 +27,7 @@ def __init__(self, name, validate=lambda _: True, validation_message=""):
self.validation = validate
self.validation_message = validation_message
self.name = name
self.data_type = data_type
try:
iter(self.validation)
except TypeError:
Expand All @@ -35,9 +36,10 @@ def __init__(self, name, validate=lambda _: True, validation_message=""):
def validate(self, value):
if value is None: # We allow assignment from None, but Nones are not sent to training.
return

for valid in self.validation:
if not valid(value):
error_message = "Invalid hyperparameter value {}".format(value)
error_message = "Invalid hyperparameter value {} for {}".format(value, self.name)
if self.validation_message:
error_message = error_message + ". Expecting: " + self.validation_message
raise ValueError(error_message)
Expand All @@ -50,6 +52,7 @@ def __get__(self, obj, objtype):

def __set__(self, obj, value):
"""Validate the supplied value and set this hyperparameter to value"""
value = None if value is None else self.data_type(value)
self.validate(value)
if '_hyperparameters' not in dir(obj):
obj._hyperparameters = dict()
Expand Down
20 changes: 10 additions & 10 deletions src/sagemaker/amazon/kmeans.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
from sagemaker.amazon.amazon_estimator import AmazonAlgorithmEstimatorBase, registry
from sagemaker.amazon.common import numpy_to_record_serializer, record_deserializer
from sagemaker.amazon.hyperparameter import Hyperparameter as hp # noqa
from sagemaker.amazon.validation import gt, isin, isint, ge
from sagemaker.amazon.validation import gt, isin, ge
from sagemaker.predictor import RealTimePredictor
from sagemaker.model import Model
from sagemaker.session import Session
Expand All @@ -23,15 +23,15 @@ class KMeans(AmazonAlgorithmEstimatorBase):

repo = 'kmeans:1'

k = hp('k', (gt(1), isint), 'An integer greater-than 1')
init_method = hp('init_method', isin('random', 'kmeans++'), 'One of "random", "kmeans++"')
max_iterations = hp('local_lloyd_max_iterations', (gt(0), isint), 'An integer greater-than 0')
tol = hp('local_lloyd_tol', (gt(0), isint), 'An integer greater-than 0')
num_trials = hp('local_lloyd_num_trials', (gt(0), isint), 'An integer greater-than 0')
local_init_method = hp('local_lloyd_init_method', isin('random', 'kmeans++'), 'One of "random", "kmeans++"')
half_life_time_size = hp('half_life_time_size', (ge(0), isint), 'An integer greater-than-or-equal-to 0')
epochs = hp('epochs', (gt(0), isint), 'An integer greater-than 0')
center_factor = hp('extra_center_factor', (gt(0), isint), 'An integer greater-than 0')
k = hp('k', gt(1), 'An integer greater-than 1', int)
init_method = hp('init_method', isin('random', 'kmeans++'), 'One of "random", "kmeans++"', str)
max_iterations = hp('local_lloyd_max_iterations', gt(0), 'An integer greater-than 0', int)
tol = hp('local_lloyd_tol', gt(0), 'An integer greater-than 0', int)
num_trials = hp('local_lloyd_num_trials', gt(0), 'An integer greater-than 0', int)
local_init_method = hp('local_lloyd_init_method', isin('random', 'kmeans++'), 'One of "random", "kmeans++"', str)
half_life_time_size = hp('half_life_time_size', ge(0), 'An integer greater-than-or-equal-to 0', int)
epochs = hp('epochs', gt(0), 'An integer greater-than 0', int)
center_factor = hp('extra_center_factor', gt(0), 'An integer greater-than 0', int)

def __init__(self, role, train_instance_count, train_instance_type, k, init_method=None,
max_iterations=None, tol=None, num_trials=None, local_init_method=None,
Expand Down
67 changes: 34 additions & 33 deletions src/sagemaker/amazon/linear_learner.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
from sagemaker.amazon.amazon_estimator import AmazonAlgorithmEstimatorBase, registry
from sagemaker.amazon.common import numpy_to_record_serializer, record_deserializer
from sagemaker.amazon.hyperparameter import Hyperparameter as hp # noqa
from sagemaker.amazon.validation import isin, gt, lt, isint, isbool, isnumber
from sagemaker.amazon.validation import isin, gt, lt
from sagemaker.predictor import RealTimePredictor
from sagemaker.model import Model
from sagemaker.session import Session
Expand All @@ -27,40 +27,41 @@ class LinearLearner(AmazonAlgorithmEstimatorBase):

binary_classifier_model_selection_criteria = hp('binary_classifier_model_selection_criteria',
isin('accuracy', 'f1', 'precision_at_target_recall',
'recall_at_target_precision', 'cross_entropy_loss'))
target_recall = hp('target_recall', (gt(0), lt(1)), "A float in (0,1)")
target_precision = hp('target_precision', (gt(0), lt(1)), "A float in (0,1)")
positive_example_weight_mult = hp('positive_example_weight_mult', gt(0), "A float greater than 0")
epochs = hp('epochs', (gt(0), isint), "An integer greater-than 0")
'recall_at_target_precision', 'cross_entropy_loss'),
data_type=str)
target_recall = hp('target_recall', (gt(0), lt(1)), "A float in (0,1)", float)
target_precision = hp('target_precision', (gt(0), lt(1)), "A float in (0,1)", float)
positive_example_weight_mult = hp('positive_example_weight_mult', gt(0), "A float greater than 0", float)
epochs = hp('epochs', gt(0), "An integer greater-than 0", int)
predictor_type = hp('predictor_type', isin('binary_classifier', 'regressor'),
'One of "binary_classifier" or "regressor"')
use_bias = hp('use_bias', isbool, "Either True or False")
num_models = hp('num_models', (gt(0), isint), "An integer greater-than 0")
num_calibration_samples = hp('num_calibration_samples', (gt(0), isint), "An integer greater-than 0")
init_method = hp('init_method', isin('uniform', 'normal'), 'One of "uniform" or "normal"')
init_scale = hp('init_scale', (gt(-1), lt(1)), 'A float in (-1, 1)')
init_sigma = hp('init_sigma', (gt(0), lt(1)), 'A float in (0, 1)')
init_bias = hp('init_bias', isnumber, 'A number')
optimizer = hp('optimizer', isin('sgd', 'adam', 'auto'), 'One of "sgd", "adam" or "auto')
'One of "binary_classifier" or "regressor"', str)
use_bias = hp('use_bias', (), "Either True or False", bool)
num_models = hp('num_models', gt(0), "An integer greater-than 0", int)
num_calibration_samples = hp('num_calibration_samples', gt(0), "An integer greater-than 0", int)
init_method = hp('init_method', isin('uniform', 'normal'), 'One of "uniform" or "normal"', str)
init_scale = hp('init_scale', (gt(-1), lt(1)), 'A float in (-1, 1)', float)
init_sigma = hp('init_sigma', (gt(0), lt(1)), 'A float in (0, 1)', float)
init_bias = hp('init_bias', (), 'A number', float)
optimizer = hp('optimizer', isin('sgd', 'adam', 'auto'), 'One of "sgd", "adam" or "auto', str)
loss = hp('loss', isin('logistic', 'squared_loss', 'absolute_loss', 'auto'),
'"logistic", "squared_loss", "absolute_loss" or"auto"')
wd = hp('wd', (gt(0), lt(1)), 'A float in (0,1)')
l1 = hp('l1', (gt(0), lt(1)), 'A float in (0,1)')
momentum = hp('momentum', (gt(0), lt(1)), 'A float in (0,1)')
learning_rate = hp('learning_rate', (gt(0), lt(1)), 'A float in (0,1)')
beta_1 = hp('beta_1', (gt(0), lt(1)), 'A float in (0,1)')
beta_2 = hp('beta_1', (gt(0), lt(1)), 'A float in (0,1)')
bias_lr_mult = hp('bias_lr_mult', gt(0), 'A float greater-than 0')
bias_wd_mult = hp('bias_wd_mult', gt(0), 'A float greater-than 0')
use_lr_scheduler = hp('use_lr_scheduler', isbool, 'A boolean')
lr_scheduler_step = hp('lr_scheduler_step', (gt(0), isint), 'An integer greater-than 0')
lr_scheduler_factor = hp('lr_scheduler_factor', (gt(0), lt(1)), 'A float in (0,1)')
lr_scheduler_minimum_lr = hp('lr_scheduler_minimum_lr', gt(0), 'A float greater-than 0')
normalize_data = hp('normalize_data', isbool, 'A boolean')
normalize_label = hp('normalize_label', isbool, 'A boolean')
unbias_data = hp('unbias_data', isbool, 'A boolean')
unbias_label = hp('unbias_label', isbool, 'A boolean')
num_point_for_scalar = hp('num_point_for_scalar', (isint, gt(0)), 'An integer greater-than 0')
'"logistic", "squared_loss", "absolute_loss" or"auto"', str)
wd = hp('wd', (gt(0), lt(1)), 'A float in (0,1)', float)
l1 = hp('l1', (gt(0), lt(1)), 'A float in (0,1)', float)
momentum = hp('momentum', (gt(0), lt(1)), 'A float in (0,1)', float)
learning_rate = hp('learning_rate', (gt(0), lt(1)), 'A float in (0,1)', float)
beta_1 = hp('beta_1', (gt(0), lt(1)), 'A float in (0,1)', float)
beta_2 = hp('beta_1', (gt(0), lt(1)), 'A float in (0,1)', float)
bias_lr_mult = hp('bias_lr_mult', gt(0), 'A float greater-than 0', float)
bias_wd_mult = hp('bias_wd_mult', gt(0), 'A float greater-than 0', float)
use_lr_scheduler = hp('use_lr_scheduler', (), 'A boolean', bool)
lr_scheduler_step = hp('lr_scheduler_step', gt(0), 'An integer greater-than 0', int)
lr_scheduler_factor = hp('lr_scheduler_factor', (gt(0), lt(1)), 'A float in (0,1)', float)
lr_scheduler_minimum_lr = hp('lr_scheduler_minimum_lr', gt(0), 'A float greater-than 0', float)
normalize_data = hp('normalize_data', (), 'A boolean', bool)
normalize_label = hp('normalize_label', (), 'A boolean', bool)
unbias_data = hp('unbias_data', (), 'A boolean', bool)
unbias_label = hp('unbias_label', (), 'A boolean', bool)
num_point_for_scalar = hp('num_point_for_scalar', gt(0), 'An integer greater-than 0', int)

def __init__(self, role, train_instance_count, train_instance_type, predictor_type='binary_classifier',
binary_classifier_model_selection_criteria=None, target_recall=None, target_precision=None,
Expand Down
13 changes: 6 additions & 7 deletions src/sagemaker/amazon/pca.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,14 +24,13 @@ class PCA(AmazonAlgorithmEstimatorBase):

DEFAULT_MINI_BATCH_SIZE = 500

num_components = hp(name='num_components', validate=lambda x: x > 0 and isinstance(x, int),
validation_message='Value must be an integer greater than zero')
num_components = hp(name='num_components', validate=lambda x: x > 0,
validation_message='Value must be an integer greater than zero', data_type=int)
algorithm_mode = hp(name='algorithm_mode', validate=lambda x: x in ['regular', 'stable', 'randomized'],
validation_message='Value must be one of "regular", "stable", "randomized"')
subtract_mean = hp(name='subtract_mean', validate=lambda x: isinstance(x, bool),
validation_message='Value must be a boolean')
extra_components = hp(name='extra_components', validate=lambda x: x >= 0 and isinstance(x, int),
validation_message="Value must be an integer greater than or equal to 0")
validation_message='Value must be one of "regular", "stable", "randomized"', data_type=str)
subtract_mean = hp(name='subtract_mean', validation_message='Value must be a boolean', data_type=bool)
extra_components = hp(name='extra_components', validate=lambda x: x >= 0,
validation_message="Value must be an integer greater than or equal to 0", data_type=int)

def __init__(self, role, train_instance_count, train_instance_type, num_components,
algorithm_mode=None, subtract_mean=None, extra_components=None, **kwargs):
Expand Down
6 changes: 0 additions & 6 deletions src/sagemaker/amazon/validation.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,6 @@
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import numbers


def gt(minimum):
Expand Down Expand Up @@ -41,8 +40,3 @@ def istype(expected):
def validate(value):
return isinstance(value, expected)
return validate


isint = istype(int)
isbool = istype(bool)
isnumber = istype(numbers.Number) # noqa
21 changes: 19 additions & 2 deletions tests/unit/test_hyperparameter.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,9 +16,9 @@

class Test(object):

blank = Hyperparameter(name="some-name")
blank = Hyperparameter(name="some-name", data_type=int)
elizabeth = Hyperparameter(name='elizabeth')
validated = Hyperparameter(name="validated", validate=lambda value: value > 55)
validated = Hyperparameter(name="validated", validate=lambda value: value > 55, data_type=int)


def test_blank_access():
Expand Down Expand Up @@ -55,3 +55,20 @@ def test_validated():
x.validated = 66
with pytest.raises(ValueError):
x.validated = 23


def test_data_type():
x = Test()
x.validated = 66
assert type(x.validated) == Test.__dict__["validated"].data_type


def test_from_string():
x = Test()
value = 65

x.validated = value
from_api = str(value)

x.validated = from_api
assert x.validated == value