Skip to content

Integrate black into development process #873

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 7 commits into from
Jun 24, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions buildspec-release.yml
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,9 @@ phases:
# run linters
- tox -e flake8,pylint

# run format verification
- tox -e black-check

# run package and docbuild checks
- tox -e twine
- tox -e sphinx
Expand Down
3 changes: 3 additions & 0 deletions buildspec.yml
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,9 @@ phases:
- tox -e twine
- tox -e sphinx

# run format verification
- tox -e black-check

# run unit tests
- AWS_ACCESS_KEY_ID= AWS_SECRET_ACCESS_KEY= AWS_SESSION_TOKEN=
AWS_CONTAINER_CREDENTIALS_RELATIVE_URI= AWS_DEFAULT_REGION=
Expand Down
56 changes: 35 additions & 21 deletions doc/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,51 +28,65 @@ def __getattr__(cls, name):
return MagicMock()


MOCK_MODULES = ['tensorflow', 'tensorflow.core', 'tensorflow.core.framework', 'tensorflow.python',
'tensorflow.python.framework', 'tensorflow_serving', 'tensorflow_serving.apis',
'numpy', 'scipy', 'scipy.sparse']
MOCK_MODULES = [
"tensorflow",
"tensorflow.core",
"tensorflow.core.framework",
"tensorflow.python",
"tensorflow.python.framework",
"tensorflow_serving",
"tensorflow_serving.apis",
"numpy",
"scipy",
"scipy.sparse",
]
sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)

project = u'sagemaker'
project = u"sagemaker"
version = pkg_resources.require(project)[0].version

# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest',
'sphinx.ext.intersphinx', 'sphinx.ext.todo',
'sphinx.ext.coverage', 'sphinx.ext.autosummary',
'sphinx.ext.napoleon']
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.doctest",
"sphinx.ext.intersphinx",
"sphinx.ext.todo",
"sphinx.ext.coverage",
"sphinx.ext.autosummary",
"sphinx.ext.napoleon",
]

# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
templates_path = ["_templates"]

source_suffix = '.rst' # The suffix of source filenames.
master_doc = 'index' # The master toctree document.
source_suffix = ".rst" # The suffix of source filenames.
master_doc = "index" # The master toctree document.

copyright = u'%s, Amazon' % datetime.now().year
copyright = u"%s, Amazon" % datetime.now().year

# The full version, including alpha/beta/rc tags.
release = version

# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
exclude_trees = ["_build"]

pygments_style = 'default'
pygments_style = "default"

autoclass_content = "both"
autodoc_default_flags = ['show-inheritance', 'members', 'undoc-members']
autodoc_member_order = 'bysource'
autodoc_default_flags = ["show-inheritance", "members", "undoc-members"]
autodoc_member_order = "bysource"

if 'READTHEDOCS' in os.environ:
html_theme = 'default'
if "READTHEDOCS" in os.environ:
html_theme = "default"
else:
html_theme = 'haiku'
html_theme = "haiku"
html_static_path = []
htmlhelp_basename = '%sdoc' % project
htmlhelp_basename = "%sdoc" % project

# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
intersphinx_mapping = {"http://docs.python.org/": None}

# autosummary
autosummary_generate = True
10 changes: 5 additions & 5 deletions examples/cli/host/script.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,12 +12,12 @@ def model_fn(model_dir):
:param: model_dir The directory where model files are stored.
:return: a model (in this case a Gluon network)
"""
symbol = mx.sym.load('%s/model.json' % model_dir)
outputs = mx.symbol.softmax(data=symbol, name='softmax_label')
inputs = mx.sym.var('data')
param_dict = gluon.ParameterDict('model_')
symbol = mx.sym.load("%s/model.json" % model_dir)
outputs = mx.symbol.softmax(data=symbol, name="softmax_label")
inputs = mx.sym.var("data")
param_dict = gluon.ParameterDict("model_")
net = gluon.SymbolBlock(outputs, inputs, param_dict)
net.load_params('%s/model.params' % model_dir, ctx=mx.cpu())
net.load_params("%s/model.params" % model_dir, ctx=mx.cpu())
return net


Expand Down
4 changes: 2 additions & 2 deletions examples/cli/train/download_training_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,8 @@


def download_training_data():
gluon.data.vision.MNIST('./data/training', train=True)
gluon.data.vision.MNIST('./data/training', train=False)
gluon.data.vision.MNIST("./data/training", train=True)
gluon.data.vision.MNIST("./data/training", train=False)


if __name__ == "__main__":
Expand Down
46 changes: 27 additions & 19 deletions examples/cli/train/script.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,13 +15,13 @@ def train(channel_input_dirs, hyperparameters, **kwargs):
ctx = mx.cpu()

# retrieve the hyperparameters we set in notebook (with some defaults)
batch_size = hyperparameters.get('batch_size', 100)
epochs = hyperparameters.get('epochs', 10)
learning_rate = hyperparameters.get('learning_rate', 0.1)
momentum = hyperparameters.get('momentum', 0.9)
log_interval = hyperparameters.get('log_interval', 100)
batch_size = hyperparameters.get("batch_size", 100)
epochs = hyperparameters.get("epochs", 10)
learning_rate = hyperparameters.get("learning_rate", 0.1)
momentum = hyperparameters.get("momentum", 0.9)
log_interval = hyperparameters.get("log_interval", 100)

training_data = channel_input_dirs['training']
training_data = channel_input_dirs["training"]

# load training and validation data
# we use the gluon.data.vision.MNIST class because of its built in mnist pre-processing logic,
Expand All @@ -35,8 +35,9 @@ def train(channel_input_dirs, hyperparameters, **kwargs):
# Collect all parameters from net and its children, then initialize them.
net.initialize(mx.init.Xavier(magnitude=2.24), ctx=ctx)
# Trainer is for updating parameters with gradient.
trainer = gluon.Trainer(net.collect_params(), 'sgd',
{'learning_rate': learning_rate, 'momentum': momentum})
trainer = gluon.Trainer(
net.collect_params(), "sgd", {"learning_rate": learning_rate, "momentum": momentum}
)
metric = mx.metric.Accuracy()
loss = gluon.loss.SoftmaxCrossEntropyLoss()

Expand All @@ -61,32 +62,34 @@ def train(channel_input_dirs, hyperparameters, **kwargs):

if i % log_interval == 0 and i > 0:
name, acc = metric.get()
logger.info('[Epoch %d Batch %d] Training: %s=%f, %f samples/s' %
(epoch, i, name, acc, batch_size / (time.time() - btic)))
logger.info(
"[Epoch %d Batch %d] Training: %s=%f, %f samples/s"
% (epoch, i, name, acc, batch_size / (time.time() - btic))
)

btic = time.time()

name, acc = metric.get()
logger.info('[Epoch %d] Training: %s=%f' % (epoch, name, acc))
logger.info("[Epoch %d] Training: %s=%f" % (epoch, name, acc))

name, val_acc = test(ctx, net, val_data)
logger.info('[Epoch %d] Validation: %s=%f' % (epoch, name, val_acc))
logger.info("[Epoch %d] Validation: %s=%f" % (epoch, name, val_acc))

return net


def save(net, model_dir):
# save the model
y = net(mx.sym.var('data'))
y.save('%s/model.json' % model_dir)
net.collect_params().save('%s/model.params' % model_dir)
y = net(mx.sym.var("data"))
y.save("%s/model.json" % model_dir)
net.collect_params().save("%s/model.params" % model_dir)


def define_network():
net = nn.Sequential()
with net.name_scope():
net.add(nn.Dense(128, activation='relu'))
net.add(nn.Dense(64, activation='relu'))
net.add(nn.Dense(128, activation="relu"))
net.add(nn.Dense(64, activation="relu"))
net.add(nn.Dense(10))
return net

Expand All @@ -99,13 +102,18 @@ def input_transformer(data, label):
def get_train_data(data_dir, batch_size):
return gluon.data.DataLoader(
gluon.data.vision.MNIST(data_dir, train=True, transform=input_transformer),
batch_size=batch_size, shuffle=True, last_batch='discard')
batch_size=batch_size,
shuffle=True,
last_batch="discard",
)


def get_val_data(data_dir, batch_size):
return gluon.data.DataLoader(
gluon.data.vision.MNIST(data_dir, train=False, transform=input_transformer),
batch_size=batch_size, shuffle=False)
batch_size=batch_size,
shuffle=False,
)


def test(ctx, net, val_data):
Expand Down
88 changes: 52 additions & 36 deletions setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,46 +24,62 @@ def read(fname):


def read_version():
return read('VERSION').strip()
return read("VERSION").strip()


# Declare minimal set for installation
required_packages = ['boto3>=1.9.169', 'numpy>=1.9.0', 'protobuf>=3.1', 'scipy>=0.19.0',
'urllib3>=1.21, <1.25', 'protobuf3-to-dict>=0.1.5', 'docker-compose>=1.23.0',
'requests>=2.20.0, <2.21']
required_packages = [
"boto3>=1.9.169",
"numpy>=1.9.0",
"protobuf>=3.1",
"scipy>=0.19.0",
"urllib3>=1.21, <1.25",
"protobuf3-to-dict>=0.1.5",
"docker-compose>=1.23.0",
"requests>=2.20.0, <2.21",
]

# enum is introduced in Python 3.4. Installing enum back port
if sys.version_info < (3, 4):
required_packages.append('enum34>=1.1.6')
required_packages.append("enum34>=1.1.6")

setup(name="sagemaker",
version=read_version(),
description="Open source library for training and deploying models on Amazon SageMaker.",
packages=find_packages('src'),
package_dir={'': 'src'},
py_modules=[os.path.splitext(os.path.basename(path))[0] for path in glob('src/*.py')],
long_description=read('README.rst'),
author="Amazon Web Services",
url='https://github.com/aws/sagemaker-python-sdk/',
license="Apache License 2.0",
keywords="ML Amazon AWS AI Tensorflow MXNet",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Natural Language :: English",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.6",
],

install_requires=required_packages,

extras_require={
'test': ['tox', 'flake8', 'pytest==4.4.1', 'pytest-cov', 'pytest-rerunfailures',
'pytest-xdist', 'mock', 'tensorflow>=1.3.0', 'contextlib2',
'awslogs', 'pandas']},

entry_points={
'console_scripts': ['sagemaker=sagemaker.cli.main:main'],
})
setup(
name="sagemaker",
version=read_version(),
description="Open source library for training and deploying models on Amazon SageMaker.",
packages=find_packages("src"),
package_dir={"": "src"},
py_modules=[os.path.splitext(os.path.basename(path))[0] for path in glob("src/*.py")],
long_description=read("README.rst"),
author="Amazon Web Services",
url="https://github.com/aws/sagemaker-python-sdk/",
license="Apache License 2.0",
keywords="ML Amazon AWS AI Tensorflow MXNet",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Natural Language :: English",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.6",
],
install_requires=required_packages,
extras_require={
"test": [
"tox",
"flake8",
"pytest==4.4.1",
"pytest-cov",
"pytest-rerunfailures",
"pytest-xdist",
"mock",
"tensorflow>=1.3.0",
"contextlib2",
"awslogs",
"pandas",
"black==19.3b0 ; python_version >= '3.6'",
]
},
entry_points={"console_scripts": ["sagemaker=sagemaker.cli.main:main"]},
)
26 changes: 20 additions & 6 deletions src/sagemaker/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,15 +18,29 @@
from sagemaker.amazon.kmeans import KMeans, KMeansModel, KMeansPredictor # noqa: F401
from sagemaker.amazon.pca import PCA, PCAModel, PCAPredictor # noqa: F401
from sagemaker.amazon.lda import LDA, LDAModel, LDAPredictor # noqa: F401
from sagemaker.amazon.linear_learner import LinearLearner, LinearLearnerModel, LinearLearnerPredictor # noqa: F401
from sagemaker.amazon.factorization_machines import FactorizationMachines, FactorizationMachinesModel # noqa: F401
from sagemaker.amazon.linear_learner import ( # noqa: F401
LinearLearner,
LinearLearnerModel,
LinearLearnerPredictor,
)
from sagemaker.amazon.factorization_machines import ( # noqa: F401
FactorizationMachines,
FactorizationMachinesModel,
)
from sagemaker.amazon.factorization_machines import FactorizationMachinesPredictor # noqa: F401
from sagemaker.amazon.ntm import NTM, NTMModel, NTMPredictor # noqa: F401
from sagemaker.amazon.randomcutforest import (RandomCutForest, RandomCutForestModel, # noqa: F401
RandomCutForestPredictor)
from sagemaker.amazon.randomcutforest import ( # noqa: F401
RandomCutForest,
RandomCutForestModel,
RandomCutForestPredictor,
)
from sagemaker.amazon.knn import KNN, KNNModel, KNNPredictor # noqa: F401
from sagemaker.amazon.object2vec import Object2Vec, Object2VecModel # noqa: F401
from sagemaker.amazon.ipinsights import IPInsights, IPInsightsModel, IPInsightsPredictor # noqa: F401
from sagemaker.amazon.ipinsights import ( # noqa: F401
IPInsights,
IPInsightsModel,
IPInsightsPredictor,
)

from sagemaker.algorithm import AlgorithmEstimator # noqa: F401
from sagemaker.analytics import TrainingJobAnalytics, HyperparameterTuningJobAnalytics # noqa: F401
Expand All @@ -41,4 +55,4 @@
from sagemaker.session import s3_input # noqa: F401
from sagemaker.session import get_execution_role # noqa: F401

__version__ = pkg_resources.require('sagemaker')[0].version
__version__ = pkg_resources.require("sagemaker")[0].version
Loading