-
Notifications
You must be signed in to change notification settings - Fork 1.2k
Add wrapper for FactorizationMachiones algorithm. #38
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from 3 commits
cf22cfa
2784334
6a9c5e2
4b2c5e3
992a56c
cff7c60
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,29 @@ | ||
========= | ||
CHANGELOG | ||
========= | ||
|
||
1.0.2 | ||
===== | ||
|
||
* feature: Estimators: add support for Amazon FactorizationMachines algorithm | ||
* feature: Tests: add training failure tests for TF and MXNet | ||
* feature: Documentation: show how to make predictions against existing endpoint | ||
* feature: Estimators: implement write_spmatrix_to_sparse_tensor to support any scipy.sparse matrix | ||
|
||
|
||
1.0.1 | ||
===== | ||
|
||
* api-change: Model: Remove support for 'supplemental_containers' when creating Model | ||
* feature: Documentation: multiple updates | ||
* feature: Tests: ignore tests data in tox.ini, increase timeout for endpoint creation, capture exceptions during endpoint deletion, tests for input-output functions | ||
* feature: Logging: change to describe job every 30s when showing logs | ||
* feature: Session: use custom user agent at all times | ||
* feature: Setup: add travis file | ||
|
||
|
||
1.0.0 | ||
===== | ||
|
||
* Initial commit | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,202 @@ | ||
# Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. | ||
# | ||
# Licensed under the Apache License, Version 2.0 (the "License"). You | ||
# may not use this file except in compliance with the License. A copy of | ||
# the License is located at | ||
# | ||
# http://aws.amazon.com/apache2.0/ | ||
# | ||
# or in the "license" file accompanying this file. This file is | ||
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF | ||
# ANY KIND, either express or implied. See the License for the specific | ||
# language governing permissions and limitations under the License. | ||
from sagemaker.amazon.amazon_estimator import AmazonAlgorithmEstimatorBase, registry | ||
from sagemaker.amazon.common import numpy_to_record_serializer, record_deserializer | ||
from sagemaker.amazon.hyperparameter import Hyperparameter as hp # noqa | ||
from sagemaker.amazon.validation import gt, isin, isint, ge, isfloat | ||
from sagemaker.predictor import RealTimePredictor | ||
from sagemaker.model import Model | ||
from sagemaker.session import Session | ||
|
||
|
||
class FactorizationMachines(AmazonAlgorithmEstimatorBase): | ||
|
||
repo = 'factorization-machines:1' | ||
|
||
num_factors = hp('num_factors', (gt(0), isint), 'An integer greater than zero') | ||
predictor_type = hp('predictor_type', isin('binary_classifier', 'regressor'), | ||
'Value "binary_classifier" or "regressor"') | ||
epochs = hp('epochs', (gt(0), isint), "An integer greater than 0") | ||
clip_gradient = hp('clip_gradient', isfloat, "A float value") | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This forces an explicit assignment, where an implicit assignment would be fine. If I have a FacotrizationMachines object fm, then:
will fail, because 55 is not float - even though it can be represented as a float. Consider using the There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. That's a good idea - will change |
||
eps = hp('eps', isfloat, "A float value") | ||
rescale_grad = hp('rescale_grad', isfloat, "A float value") | ||
bias_lr = hp('bias_lr', (ge(0), isfloat), "A non-negative float") | ||
linear_lr = hp('linear_lr', (ge(0), isfloat), "A non-negative float") | ||
factors_lr = hp('factors_lr', (ge(0), isfloat), "A non-negative float") | ||
bias_wd = hp('bias_wd', (ge(0), isfloat), "A non-negative float") | ||
linear_wd = hp('linear_wd', (ge(0), isfloat), "A non-negative float") | ||
factors_wd = hp('factors_wd', (ge(0), isfloat), "A non-negative float") | ||
bias_init_method = hp('bias_init_method', isin('normal', 'uniform', 'constant'), | ||
'Value "normal", "uniform" or "constant"') | ||
bias_init_scale = hp('bias_init_scale', (ge(0), isfloat), "A non-negative float") | ||
bias_init_sigma = hp('bias_init_sigma', (ge(0), isfloat), "A non-negative float") | ||
bias_init_value = hp('bias_init_value', isfloat, "A float value") | ||
linear_init_method = hp('linear_init_method', isin('normal', 'uniform', 'constant'), | ||
'Value "normal", "uniform" or "constant"') | ||
linear_init_scale = hp('linear_init_scale', (ge(0), isfloat), "A non-negative float") | ||
linear_init_sigma = hp('linear_init_sigma', (ge(0), isfloat), "A non-negative float") | ||
linear_init_value = hp('linear_init_value', isfloat, "A float value") | ||
factors_init_method = hp('factors_init_method', isin('normal', 'uniform', 'constant'), | ||
'Value "normal", "uniform" or "constant"') | ||
factors_init_scale = hp('factors_init_scale', (ge(0), isfloat), "A non-negative float") | ||
factors_init_sigma = hp('factors_init_sigma', (ge(0), isfloat), "A non-negative float") | ||
factors_init_value = hp('factors_init_value', isfloat, "A float value") | ||
|
||
def __init__(self, role, train_instance_count, train_instance_type, | ||
num_factors, predictor_type, | ||
epochs=None, clip_gradient=None, eps=None, rescale_grad=None, | ||
bias_lr=None, linear_lr=None, factors_lr=None, | ||
bias_wd=None, linear_wd=None, factors_wd=None, | ||
bias_init_method=None, bias_init_scale=None, bias_init_sigma=None, bias_init_value=None, | ||
linear_init_method=None, linear_init_scale=None, linear_init_sigma=None, linear_init_value=None, | ||
factors_init_method=None, factors_init_scale=None, factors_init_sigma=None, factors_init_value=None, | ||
**kwargs): | ||
"""Factorization Machines is :class:`Estimator` for general-purpose supervised learning. | ||
|
||
Amazon SageMaker Factorization Machines is a general-purpose supervised learning algorithm that you can use | ||
for both classification and regression tasks. It is an extension of a linear model that is designed | ||
to parsimoniously capture interactions between features within high dimensional sparse datasets. | ||
|
||
This Estimator may be fit via calls to | ||
:meth:`~sagemaker.amazon.amazon_estimator.AmazonAlgorithmEstimatorBase.fit`. It requires Amazon | ||
:class:`~sagemaker.amazon.record_pb2.Record` protobuf serialized data to be stored in S3. | ||
There is an utility :meth:`~sagemaker.amazon.amazon_estimator.AmazonAlgorithmEstimatorBase.record_set` that | ||
can be used to upload data to S3 and creates :class:`~sagemaker.amazon.amazon_estimator.RecordSet` to be passed | ||
to the `fit` call. | ||
|
||
To learn more about the Amazon protobuf Record class and how to prepare bulk data in this format, please | ||
consult AWS technical documentation: https://docs.aws.amazon.com/sagemaker/latest/dg/cdf-training.html | ||
|
||
After this Estimator is fit, model data is stored in S3. The model may be deployed to an Amazon SageMaker | ||
Endpoint by invoking :meth:`~sagemaker.amazon.estimator.EstimatorBase.deploy`. As well as deploying an Endpoint, | ||
deploy returns a :class:`~sagemaker.amazon.pca.FactorizationMachinesPredictor` object that can be used | ||
for inference calls using the trained model hosted in the SageMaker Endpoint. | ||
|
||
FactorizationMachines Estimators can be configured by setting hyperparameters. The available hyperparameters for | ||
FactorizationMachines are documented below. | ||
|
||
For further information on the AWS FactorizationMachines algorithm, | ||
please consult AWS technical documentation: https://docs.aws.amazon.com/sagemaker/latest/dg/fact-machines.html | ||
|
||
Args: | ||
role (str): An AWS IAM role (either name or full ARN). The Amazon SageMaker training jobs and | ||
APIs that create Amazon SageMaker endpoints use this role to access | ||
training data and model artifacts. After the endpoint is created, | ||
the inference code might use the IAM role, if accessing AWS resource. | ||
train_instance_count (int): Number of Amazon EC2 instances to use for training. | ||
train_instance_type (str): Type of EC2 instance to use for training, for example, 'ml.c4.xlarge'. | ||
num_factors (int): Dimensionality of factorization. | ||
predictor_type (str): Type of predictor 'binary_classifier' or 'regressor'. | ||
epochs (int): Number of training epochs to run. | ||
clip_gradient (float): Optimizer parameter. Clip the gradient by projecting onto | ||
the box [-clip_gradient, +clip_gradient] | ||
eps (float): Optimizer parameter. Small value to avoid division by 0. | ||
rescale_grad (float): Optimizer parameter. If set, multiplies the gradient with rescale_grad | ||
before updating. Often choose to be 1.0/batch_size. | ||
bias_lr (float): Non-negative learning rate for the bias term. | ||
linear_lr (float): Non-negative learning rate for linear terms. | ||
factors_lr (float): Noon-negative learning rate for factorization terms. | ||
bias_wd (float): Non-negative weight decay for the bias term. | ||
linear_wd (float): Non-negative weight decay for linear terms. | ||
factors_wd (float): Non-negative weight decay for factorization terms. | ||
bias_init_method (string): Initialization method for the bias term: 'normal', 'uniform' or 'constant'. | ||
bias_init_scale (float): Non-negative range for initialization of the bias term that takes | ||
effect when bias_init_method parameter is 'uniform' | ||
bias_init_sigma (float): Non-negative standard deviation for initialization of the bias term that takes | ||
effect when bias_init_method parameter is 'normal'. | ||
bias_init_value (float): Initial value of the bias term that takes effect | ||
when bias_init_method parameter is 'constant'. | ||
linear_init_method (string): Initialization method for linear term: 'normal', 'uniform' or 'constant'. | ||
linear_init_scale (float): Non-negative range for initialization of linear terms that takes | ||
effect when linear_init_method parameter is 'uniform'. | ||
linear_init_sigma (float): Non-negative standard deviation for initialization of linear terms that takes | ||
effect when linear_init_method parameter is 'normal'. | ||
linear_init_value (float): Initial value of linear terms that takes effect | ||
when linear_init_method parameter is 'constant'. | ||
factors_init_method (string): Initialization method for factorization term: 'normal', | ||
'uniform' or 'constant'. | ||
factors_init_scale (float): Non-negative range for initialization of factorization terms that takes | ||
effect when factors_init_method parameter is 'uniform'. | ||
factors_init_sigma (float): Non-negative standard deviation for initialization of factorization terms that | ||
takes effect when factors_init_method parameter is 'normal'. | ||
factors_init_value (float): Initial value of factorization terms that takes | ||
effect when factors_init_method parameter is 'constant'. | ||
**kwargs: base class keyword argument values. | ||
""" | ||
super(FactorizationMachines, self).__init__(role, train_instance_count, train_instance_type, **kwargs) | ||
|
||
self.num_factors = num_factors | ||
self.predictor_type = predictor_type | ||
self.epochs = epochs | ||
self.clip_gradient = clip_gradient | ||
self.eps = eps | ||
self.rescale_grad = rescale_grad | ||
self.bias_lr = bias_lr | ||
self.linear_lr = linear_lr | ||
self.factors_lr = factors_lr | ||
self.bias_wd = bias_wd | ||
self.linear_wd = linear_wd | ||
self.factors_wd = factors_wd | ||
self.bias_init_method = bias_init_method | ||
self.bias_init_scale = bias_init_scale | ||
self.bias_init_sigma = bias_init_sigma | ||
self.bias_init_value = bias_init_value | ||
self.linear_init_method = linear_init_method | ||
self.linear_init_scale = linear_init_scale | ||
self.linear_init_sigma = linear_init_sigma | ||
self.linear_init_value = linear_init_value | ||
self.factors_init_method = factors_init_method | ||
self.factors_init_scale = factors_init_scale | ||
self.factors_init_sigma = factors_init_sigma | ||
self.factors_init_value = factors_init_value | ||
|
||
def create_model(self): | ||
"""Return a :class:`~sagemaker.amazon.FactorizationMachinesModel` referencing the latest | ||
s3 model data produced by this Estimator.""" | ||
|
||
return FactorizationMachinesModel(self.model_data, self.role, sagemaker_session=self.sagemaker_session) | ||
|
||
|
||
class FactorizationMachinesPredictor(RealTimePredictor): | ||
"""Performs binary-classification or regression prediction from input vectors. | ||
|
||
The implementation of :meth:`~sagemaker.predictor.RealTimePredictor.predict` in this | ||
`RealTimePredictor` requires a numpy ``ndarray`` as input. The array should contain the | ||
same number of columns as the feature-dimension of the data used to fit the model this | ||
Predictor performs inference on. | ||
|
||
:meth:`predict()` returns a list of :class:`~sagemaker.amazon.record_pb2.Record` objects, one | ||
for each row in the input ``ndarray``. The prediction is stored in the ``"score"`` | ||
key of the ``Record.label`` field. | ||
Please refer to the formats details described: https://docs.aws.amazon.com/sagemaker/latest/dg/fm-in-formats.html | ||
""" | ||
|
||
def __init__(self, endpoint, sagemaker_session=None): | ||
super(FactorizationMachinesPredictor, self).__init__(endpoint, | ||
sagemaker_session, | ||
serializer=numpy_to_record_serializer(), | ||
deserializer=record_deserializer()) | ||
|
||
|
||
class FactorizationMachinesModel(Model): | ||
"""Reference S3 model data created by FactorizationMachines estimator. Calling :meth:`~sagemaker.model.Model.deploy` | ||
creates an Endpoint and returns :class:`FactorizationMachinesPredictor`.""" | ||
|
||
def __init__(self, model_data, role, sagemaker_session=None): | ||
sagemaker_session = sagemaker_session or Session() | ||
image = registry(sagemaker_session.boto_session.region_name) + "/" + FactorizationMachines.repo | ||
super(FactorizationMachinesModel, self).__init__(model_data, | ||
image, | ||
role, | ||
predictor_cls=FactorizationMachinesPredictor, | ||
sagemaker_session=sagemaker_session) |
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -33,8 +33,8 @@ class KMeans(AmazonAlgorithmEstimatorBase): | |
epochs = hp('epochs', (gt(0), isint), 'An integer greater-than 0') | ||
center_factor = hp('extra_center_factor', (gt(0), isint), 'An integer greater-than 0') | ||
|
||
def __init__(self, role, train_instance_count, train_instance_type, k, init_method=None, | ||
max_iterations=None, tol=None, num_trials=None, local_init_method=None, | ||
def __init__(self, role, train_instance_count, train_instance_type, k, default_mini_batch_size=5000, | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Introducing another argument with default value is a little dangerous. If someone is calling this function will arguments set positionally, then this will fail. That said, I think we can take this risk - the chance of this problem occuring is low and we're doing a new release. I'd recommend including constructor signature changes in the changelog. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This is a required parameter and must be specified by the user. An alternative to constructor declaration will be fit enforcement. Let me rewrite it in that alternative form. |
||
init_method=None, max_iterations=None, tol=None, num_trials=None, local_init_method=None, | ||
half_life_time_size=None, epochs=None, center_factor=None, **kwargs): | ||
""" | ||
A k-means clustering :class:`~sagemaker.amazon.AmazonAlgorithmEstimatorBase`. Finds k clusters of data in an | ||
|
@@ -67,6 +67,7 @@ def __init__(self, role, train_instance_count, train_instance_type, k, init_meth | |
train_instance_count (int): Number of Amazon EC2 instances to use for training. | ||
train_instance_type (str): Type of EC2 instance to use for training, for example, 'ml.c4.xlarge'. | ||
k (int): The number of clusters to produce. | ||
default_mini_batch_size (int): Default size of mini-batch used for training. | ||
init_method (str): How to initialize cluster locations. One of 'random' or 'kmeans++'. | ||
max_iterations (int): Maximum iterations for Lloyds EM procedure in the local kmeans used in finalize stage. | ||
tol (int): Tolerance for change in ssd for early stopping in local kmeans. | ||
|
@@ -83,7 +84,8 @@ def __init__(self, role, train_instance_count, train_instance_type, k, init_meth | |
reduce the number of centers to ``k`` when finalizing | ||
**kwargs: base class keyword argument values. | ||
""" | ||
super(KMeans, self).__init__(role, train_instance_count, train_instance_type, **kwargs) | ||
super(KMeans, self).__init__(role, train_instance_count, train_instance_type, | ||
default_mini_batch_size, **kwargs) | ||
self.k = k | ||
self.init_method = init_method | ||
self.max_iterations = max_iterations | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Love this! Should have done this from the get-go.