From 2f00d869ed4f60ca2aafa6a5e9b68cff869a8828 Mon Sep 17 00:00:00 2001 From: Dimitar Ganev Date: Mon, 12 Apr 2021 01:47:24 +0300 Subject: [PATCH 1/5] restore `.py` file extensions to the files (#3946) --- ...ustering_tensorflow.py_tf => k_means_clustering_tensorflow.py} | 0 .../lstm/{lstm_prediction.py_tf => lstm_prediction.py} | 0 neural_network/{gan.py_tf => gan.py} | 0 neural_network/{input_data.py_tf => input_data.py} | 0 4 files changed, 0 insertions(+), 0 deletions(-) rename dynamic_programming/{k_means_clustering_tensorflow.py_tf => k_means_clustering_tensorflow.py} (100%) rename machine_learning/lstm/{lstm_prediction.py_tf => lstm_prediction.py} (100%) rename neural_network/{gan.py_tf => gan.py} (100%) rename neural_network/{input_data.py_tf => input_data.py} (100%) diff --git a/dynamic_programming/k_means_clustering_tensorflow.py_tf b/dynamic_programming/k_means_clustering_tensorflow.py similarity index 100% rename from dynamic_programming/k_means_clustering_tensorflow.py_tf rename to dynamic_programming/k_means_clustering_tensorflow.py diff --git a/machine_learning/lstm/lstm_prediction.py_tf b/machine_learning/lstm/lstm_prediction.py similarity index 100% rename from machine_learning/lstm/lstm_prediction.py_tf rename to machine_learning/lstm/lstm_prediction.py diff --git a/neural_network/gan.py_tf b/neural_network/gan.py similarity index 100% rename from neural_network/gan.py_tf rename to neural_network/gan.py diff --git a/neural_network/input_data.py_tf b/neural_network/input_data.py similarity index 100% rename from neural_network/input_data.py_tf rename to neural_network/input_data.py From 4041dbf11a90e52cde34c2a2d467e577af8279d6 Mon Sep 17 00:00:00 2001 From: Dhruv Manilawala Date: Wed, 28 Apr 2021 11:46:01 +0530 Subject: [PATCH 2/5] Add back tensorflow to Python 3.9 requirements --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 349d88944656..e9bf96843708 100644 --- a/requirements.txt +++ b/requirements.txt @@ -13,5 +13,5 @@ scikit-fuzzy sklearn statsmodels sympy -tensorflow; python_version < '3.9' +tensorflow xgboost From 9ad9bf173a5aa51af02ee537752eb7e2877259a9 Mon Sep 17 00:00:00 2001 From: Dhruv Manilawala Date: Wed, 28 Apr 2021 11:51:57 +0530 Subject: [PATCH 3/5] fix imports for gan.py --- neural_network/gan.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/neural_network/gan.py b/neural_network/gan.py index deb062c48dc7..6f03f083f34f 100644 --- a/neural_network/gan.py +++ b/neural_network/gan.py @@ -1,8 +1,8 @@ import matplotlib.gridspec as gridspec import matplotlib.pyplot as plt import numpy as np +from neural_network import input_data from sklearn.utils import shuffle -import input_data random_numer = 42 From e7a02c931d727ba217c9668a749aa5bd5d7f78a5 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Wed, 28 Apr 2021 08:24:56 +0200 Subject: [PATCH 4/5] tensorflow>=2.5rc2 --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index e9bf96843708..63f890a8b8bb 100644 --- a/requirements.txt +++ b/requirements.txt @@ -13,5 +13,5 @@ scikit-fuzzy sklearn statsmodels sympy -tensorflow +tensorflow>=2.5rc2 xgboost From 7420d11ff354abf2229bc485c5e8e473de3fec4a Mon Sep 17 00:00:00 2001 From: Dimitar Ganev Date: Thu, 6 May 2021 10:58:42 +0300 Subject: [PATCH 5/5] fix pre-commit errors --- .../k_means_clustering_tensorflow.py | 3 +- neural_network/gan.py | 3 +- neural_network/input_data.py | 96 +++++++++---------- 3 files changed, 48 insertions(+), 54 deletions(-) diff --git a/dynamic_programming/k_means_clustering_tensorflow.py b/dynamic_programming/k_means_clustering_tensorflow.py index 4fbcedeaa0dc..b19ffb64c5e9 100644 --- a/dynamic_programming/k_means_clustering_tensorflow.py +++ b/dynamic_programming/k_means_clustering_tensorflow.py @@ -1,5 +1,6 @@ -import tensorflow as tf from random import shuffle + +import tensorflow as tf from numpy import array diff --git a/neural_network/gan.py b/neural_network/gan.py index 6f03f083f34f..6eeb50975ad7 100644 --- a/neural_network/gan.py +++ b/neural_network/gan.py @@ -1,9 +1,10 @@ import matplotlib.gridspec as gridspec import matplotlib.pyplot as plt import numpy as np -from neural_network import input_data from sklearn.utils import shuffle +from neural_network import input_data + random_numer = 42 np.random.seed(random_numer) diff --git a/neural_network/input_data.py b/neural_network/input_data.py index 0e22ac0bcda5..4937508d389d 100644 --- a/neural_network/input_data.py +++ b/neural_network/input_data.py @@ -23,11 +23,9 @@ import os import numpy -from six.moves import urllib from six.moves import xrange # pylint: disable=redefined-builtin - -from tensorflow.python.framework import dtypes -from tensorflow.python.framework import random_seed +from six.moves import urllib +from tensorflow.python.framework import dtypes, random_seed from tensorflow.python.platform import gfile from tensorflow.python.util.deprecation import deprecated @@ -46,16 +44,16 @@ def _read32(bytestream): def _extract_images(f): """Extract the images into a 4D uint8 numpy array [index, y, x, depth]. - Args: - f: A file object that can be passed into a gzip reader. + Args: + f: A file object that can be passed into a gzip reader. - Returns: - data: A 4D uint8 numpy array [index, y, x, depth]. + Returns: + data: A 4D uint8 numpy array [index, y, x, depth]. - Raises: - ValueError: If the bytestream does not start with 2051. + Raises: + ValueError: If the bytestream does not start with 2051. - """ + """ print("Extracting", f.name) with gzip.GzipFile(fileobj=f) as bytestream: magic = _read32(bytestream) @@ -86,17 +84,17 @@ def _dense_to_one_hot(labels_dense, num_classes): def _extract_labels(f, one_hot=False, num_classes=10): """Extract the labels into a 1D uint8 numpy array [index]. - Args: - f: A file object that can be passed into a gzip reader. - one_hot: Does one hot encoding for the result. - num_classes: Number of classes for the one hot encoding. + Args: + f: A file object that can be passed into a gzip reader. + one_hot: Does one hot encoding for the result. + num_classes: Number of classes for the one hot encoding. - Returns: - labels: a 1D uint8 numpy array. + Returns: + labels: a 1D uint8 numpy array. - Raises: - ValueError: If the bystream doesn't start with 2049. - """ + Raises: + ValueError: If the bystream doesn't start with 2049. + """ print("Extracting", f.name) with gzip.GzipFile(fileobj=f) as bytestream: magic = _read32(bytestream) @@ -115,8 +113,8 @@ def _extract_labels(f, one_hot=False, num_classes=10): class _DataSet: """Container class for a _DataSet (deprecated). - THIS CLASS IS DEPRECATED. - """ + THIS CLASS IS DEPRECATED. + """ @deprecated( None, @@ -135,21 +133,21 @@ def __init__( ): """Construct a _DataSet. - one_hot arg is used only if fake_data is true. `dtype` can be either - `uint8` to leave the input as `[0, 255]`, or `float32` to rescale into - `[0, 1]`. Seed arg provides for convenient deterministic testing. - - Args: - images: The images - labels: The labels - fake_data: Ignore inages and labels, use fake data. - one_hot: Bool, return the labels as one hot vectors (if True) or ints (if - False). - dtype: Output image dtype. One of [uint8, float32]. `uint8` output has - range [0,255]. float32 output has range [0,1]. - reshape: Bool. If True returned images are returned flattened to vectors. - seed: The random seed to use. - """ + one_hot arg is used only if fake_data is true. `dtype` can be either + `uint8` to leave the input as `[0, 255]`, or `float32` to rescale into + `[0, 1]`. Seed arg provides for convenient deterministic testing. + + Args: + images: The images + labels: The labels + fake_data: Ignore inages and labels, use fake data. + one_hot: Bool, return the labels as one hot vectors (if True) or ints (if + False). + dtype: Output image dtype. One of [uint8, float32]. `uint8` output has + range [0,255]. float32 output has range [0,1]. + reshape: Bool. If True returned images are returned flattened to vectors. + seed: The random seed to use. + """ seed1, seed2 = random_seed.get_seed(seed) # If op level seed is not set, use whatever graph level seed is returned numpy.random.seed(seed1 if seed is None else seed2) @@ -250,14 +248,14 @@ def next_batch(self, batch_size, fake_data=False, shuffle=True): def _maybe_download(filename, work_directory, source_url): """Download the data from source url, unless it's already here. - Args: - filename: string, name of the file in the directory. - work_directory: string, path to working directory. - source_url: url to download from if file doesn't exist. + Args: + filename: string, name of the file in the directory. + work_directory: string, path to working directory. + source_url: url to download from if file doesn't exist. - Returns: - Path to resulting file. - """ + Returns: + Path to resulting file. + """ if not gfile.Exists(work_directory): gfile.MakeDirs(work_directory) filepath = os.path.join(work_directory, filename) @@ -293,10 +291,8 @@ def fake(): validation = fake() test = fake() return _Datasets(train=train, validation=validation, test=test) - if not source_url: # empty string check source_url = DEFAULT_SOURCE_URL - train_images_file = "train-images-idx3-ubyte.gz" train_labels_file = "train-labels-idx1-ubyte.gz" test_images_file = "t10k-images-idx3-ubyte.gz" @@ -307,30 +303,26 @@ def fake(): ) with gfile.Open(local_file, "rb") as f: train_images = _extract_images(f) - local_file = _maybe_download( train_labels_file, train_dir, source_url + train_labels_file ) with gfile.Open(local_file, "rb") as f: train_labels = _extract_labels(f, one_hot=one_hot) - local_file = _maybe_download( test_images_file, train_dir, source_url + test_images_file ) with gfile.Open(local_file, "rb") as f: test_images = _extract_images(f) - local_file = _maybe_download( test_labels_file, train_dir, source_url + test_labels_file ) with gfile.Open(local_file, "rb") as f: test_labels = _extract_labels(f, one_hot=one_hot) - if not 0 <= validation_size <= len(train_images): raise ValueError( - f"Validation size should be between 0 and {len(train_images)}. Received: {validation_size}." + f"Validation size should be between 0 " + f"and {len(train_images)}. Received: {validation_size}." ) - validation_images = train_images[:validation_size] validation_labels = train_labels[:validation_size] train_images = train_images[validation_size:]