13
13
from sagemaker .amazon .amazon_estimator import AmazonAlgorithmEstimatorBase , registry
14
14
from sagemaker .amazon .common import numpy_to_record_serializer , record_deserializer
15
15
from sagemaker .amazon .hyperparameter import Hyperparameter as hp # noqa
16
- from sagemaker .amazon .validation import gt , isin , isint , ge , isnumber
16
+ from sagemaker .amazon .validation import gt , isin , ge
17
17
from sagemaker .predictor import RealTimePredictor
18
18
from sagemaker .model import Model
19
19
from sagemaker .session import Session
@@ -23,34 +23,34 @@ class FactorizationMachines(AmazonAlgorithmEstimatorBase):
23
23
24
24
repo = 'factorization-machines:1'
25
25
26
- num_factors = hp ('num_factors' , ( gt (0 ), isint ), 'An integer greater than zero' , int )
26
+ num_factors = hp ('num_factors' , gt (0 ), 'An integer greater than zero' , int )
27
27
predictor_type = hp ('predictor_type' , isin ('binary_classifier' , 'regressor' ),
28
28
'Value "binary_classifier" or "regressor"' , str )
29
- epochs = hp ('epochs' , ( gt (0 ), isint ), "An integer greater than 0" , int )
30
- clip_gradient = hp ('clip_gradient' , isnumber , "A float value" , float )
31
- eps = hp ('eps' , isnumber , "A float value" , float )
32
- rescale_grad = hp ('rescale_grad' , isnumber , "A float value" , float )
33
- bias_lr = hp ('bias_lr' , ( ge (0 ), isnumber ), "A non-negative float" , float )
34
- linear_lr = hp ('linear_lr' , ( ge (0 ), isnumber ), "A non-negative float" , float )
35
- factors_lr = hp ('factors_lr' , ( ge (0 ), isnumber ), "A non-negative float" , float )
36
- bias_wd = hp ('bias_wd' , ( ge (0 ), isnumber ), "A non-negative float" , float )
37
- linear_wd = hp ('linear_wd' , ( ge (0 ), isnumber ), "A non-negative float" , float )
38
- factors_wd = hp ('factors_wd' , ( ge (0 ), isnumber ), "A non-negative float" , float )
29
+ epochs = hp ('epochs' , gt (0 ), "An integer greater than 0" , int )
30
+ clip_gradient = hp ('clip_gradient' , () , "A float value" , float )
31
+ eps = hp ('eps' , () , "A float value" , float )
32
+ rescale_grad = hp ('rescale_grad' , () , "A float value" , float )
33
+ bias_lr = hp ('bias_lr' , ge (0 ), "A non-negative float" , float )
34
+ linear_lr = hp ('linear_lr' , ge (0 ), "A non-negative float" , float )
35
+ factors_lr = hp ('factors_lr' , ge (0 ), "A non-negative float" , float )
36
+ bias_wd = hp ('bias_wd' , ge (0 ), "A non-negative float" , float )
37
+ linear_wd = hp ('linear_wd' , ge (0 ), "A non-negative float" , float )
38
+ factors_wd = hp ('factors_wd' , ge (0 ), "A non-negative float" , float )
39
39
bias_init_method = hp ('bias_init_method' , isin ('normal' , 'uniform' , 'constant' ),
40
40
'Value "normal", "uniform" or "constant"' , str )
41
- bias_init_scale = hp ('bias_init_scale' , ( ge (0 ), isnumber ), "A non-negative float" , float )
42
- bias_init_sigma = hp ('bias_init_sigma' , ( ge (0 ), isnumber ), "A non-negative float" , float )
43
- bias_init_value = hp ('bias_init_value' , isnumber , "A float value" , float )
41
+ bias_init_scale = hp ('bias_init_scale' , ge (0 ), "A non-negative float" , float )
42
+ bias_init_sigma = hp ('bias_init_sigma' , ge (0 ), "A non-negative float" , float )
43
+ bias_init_value = hp ('bias_init_value' , () , "A float value" , float )
44
44
linear_init_method = hp ('linear_init_method' , isin ('normal' , 'uniform' , 'constant' ),
45
45
'Value "normal", "uniform" or "constant"' , str )
46
- linear_init_scale = hp ('linear_init_scale' , ( ge (0 ), isnumber ), "A non-negative float" , float )
47
- linear_init_sigma = hp ('linear_init_sigma' , ( ge (0 ), isnumber ), "A non-negative float" , float )
48
- linear_init_value = hp ('linear_init_value' , isnumber , "A float value" , float )
46
+ linear_init_scale = hp ('linear_init_scale' , ge (0 ), "A non-negative float" , float )
47
+ linear_init_sigma = hp ('linear_init_sigma' , ge (0 ), "A non-negative float" , float )
48
+ linear_init_value = hp ('linear_init_value' , () , "A float value" , float )
49
49
factors_init_method = hp ('factors_init_method' , isin ('normal' , 'uniform' , 'constant' ),
50
50
'Value "normal", "uniform" or "constant"' , str )
51
- factors_init_scale = hp ('factors_init_scale' , ( ge (0 ), isnumber ), "A non-negative float" , float )
52
- factors_init_sigma = hp ('factors_init_sigma' , ( ge (0 ), isnumber ), "A non-negative float" , float )
53
- factors_init_value = hp ('factors_init_value' , isnumber , "A float value" , float )
51
+ factors_init_scale = hp ('factors_init_scale' , ge (0 ), "A non-negative float" , float )
52
+ factors_init_sigma = hp ('factors_init_sigma' , ge (0 ), "A non-negative float" , float )
53
+ factors_init_value = hp ('factors_init_value' , () , "A float value" , float )
54
54
55
55
def __init__ (self , role , train_instance_count , train_instance_type ,
56
56
num_factors , predictor_type ,
0 commit comments