Skip to content

Commit 81531d4

Browse files
authored
Fixes in LinearLearner and unit tests addition. (#77)
1 parent 284c712 commit 81531d4

File tree

3 files changed

+565
-12
lines changed

3 files changed

+565
-12
lines changed

src/sagemaker/amazon/linear_learner.py

+6-6
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,7 @@ class LinearLearner(AmazonAlgorithmEstimatorBase):
5151
momentum = hp('momentum', (gt(0), lt(1)), 'A float in (0,1)', float)
5252
learning_rate = hp('learning_rate', (gt(0), lt(1)), 'A float in (0,1)', float)
5353
beta_1 = hp('beta_1', (gt(0), lt(1)), 'A float in (0,1)', float)
54-
beta_2 = hp('beta_1', (gt(0), lt(1)), 'A float in (0,1)', float)
54+
beta_2 = hp('beta_2', (gt(0), lt(1)), 'A float in (0,1)', float)
5555
bias_lr_mult = hp('bias_lr_mult', gt(0), 'A float greater-than 0', float)
5656
bias_wd_mult = hp('bias_wd_mult', gt(0), 'A float greater-than 0', float)
5757
use_lr_scheduler = hp('use_lr_scheduler', (), 'A boolean', bool)
@@ -62,7 +62,7 @@ class LinearLearner(AmazonAlgorithmEstimatorBase):
6262
normalize_label = hp('normalize_label', (), 'A boolean', bool)
6363
unbias_data = hp('unbias_data', (), 'A boolean', bool)
6464
unbias_label = hp('unbias_label', (), 'A boolean', bool)
65-
num_point_for_scalar = hp('num_point_for_scalar', gt(0), 'An integer greater-than 0', int)
65+
num_point_for_scaler = hp('num_point_for_scaler', gt(0), 'An integer greater-than 0', int)
6666

6767
def __init__(self, role, train_instance_count, train_instance_type, predictor_type='binary_classifier',
6868
binary_classifier_model_selection_criteria=None, target_recall=None, target_precision=None,
@@ -71,7 +71,7 @@ def __init__(self, role, train_instance_count, train_instance_type, predictor_ty
7171
optimizer=None, loss=None, wd=None, l1=None, momentum=None, learning_rate=None, beta_1=None,
7272
beta_2=None, bias_lr_mult=None, bias_wd_mult=None, use_lr_scheduler=None, lr_scheduler_step=None,
7373
lr_scheduler_factor=None, lr_scheduler_minimum_lr=None, normalize_data=None,
74-
normalize_label=None, unbias_data=None, unbias_label=None, num_point_for_scalar=None, **kwargs):
74+
normalize_label=None, unbias_data=None, unbias_label=None, num_point_for_scaler=None, **kwargs):
7575
"""An :class:`Estimator` for binary classification and regression.
7676
7777
Amazon SageMaker Linear Learner provides a solution for both classification and regression problems, allowing
@@ -186,14 +186,14 @@ def __init__(self, role, train_instance_count, train_instance_type, predictor_ty
186186
self.normalize_data = normalize_data
187187
self.normalize_label = normalize_label
188188
self.unbias_data = unbias_data
189-
self.ubias_label = unbias_label
190-
self.num_point_for_scaler = num_point_for_scalar
189+
self.unbias_label = unbias_label
190+
self.num_point_for_scaler = num_point_for_scaler
191191

192192
def create_model(self):
193193
"""Return a :class:`~sagemaker.amazon.kmeans.LinearLearnerModel` referencing the latest
194194
s3 model data produced by this Estimator."""
195195

196-
return LinearLearnerModel(self, self.model_data, self.role, self.sagemaker_session)
196+
return LinearLearnerModel(self.model_data, self.role, self.sagemaker_session)
197197

198198
def fit(self, records, mini_batch_size=None, **kwargs):
199199
# mini_batch_size can't be greater than number of records or training job fails

tests/integ/test_linear_learner.py

+5-6
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,7 @@ def test_linear_learner():
4444
ll = LinearLearner('SageMakerRole', 1, 'ml.c4.2xlarge', base_job_name='test-linear-learner',
4545
sagemaker_session=sagemaker_session)
4646
ll.binary_classifier_model_selection_criteria = 'accuracy'
47-
ll.target_reacall = 0.5
47+
ll.target_recall = 0.5
4848
ll.target_precision = 0.5
4949
ll.positive_example_weight_mult = 0.1
5050
ll.epochs = 1
@@ -72,14 +72,13 @@ def test_linear_learner():
7272
ll.normalize_label = False
7373
ll.unbias_data = True
7474
ll.unbias_label = False
75-
ll.num_point_for_scala = 10000
75+
ll.num_point_for_scaler = 10000
7676
ll.fit(ll.record_set(train_set[0][:200], train_set[1][:200]))
7777

7878
endpoint_name = name_from_base('linear-learner')
7979
with timeout_and_delete_endpoint_by_name(endpoint_name, sagemaker_session, minutes=20):
8080

81-
model = LinearLearnerModel(ll.model_data, role='SageMakerRole', sagemaker_session=sagemaker_session)
82-
predictor = model.deploy(1, 'ml.c4.xlarge', endpoint_name=endpoint_name)
81+
predictor = ll.deploy(1, 'ml.c4.xlarge', endpoint_name=endpoint_name)
8382

8483
result = predictor.predict(train_set[0][0:100])
8584
assert len(result) == 100
@@ -110,7 +109,7 @@ def test_async_linear_learner():
110109
ll = LinearLearner('SageMakerRole', 1, 'ml.c4.2xlarge', base_job_name='test-linear-learner',
111110
sagemaker_session=sagemaker_session)
112111
ll.binary_classifier_model_selection_criteria = 'accuracy'
113-
ll.target_reacall = 0.5
112+
ll.target_recall = 0.5
114113
ll.target_precision = 0.5
115114
ll.positive_example_weight_mult = 0.1
116115
ll.epochs = 1
@@ -138,7 +137,7 @@ def test_async_linear_learner():
138137
ll.normalize_label = False
139138
ll.unbias_data = True
140139
ll.unbias_label = False
141-
ll.num_point_for_scala = 10000
140+
ll.num_point_for_scaler = 10000
142141
ll.fit(ll.record_set(train_set[0][:200], train_set[1][:200]), wait=False)
143142
training_job_name = ll.latest_training_job.name
144143

0 commit comments

Comments
 (0)