Skip to content

Commit f2246ce

Browse files
MaximSmolskiypre-commit-ci[bot]
andauthoredMar 28, 2024
Enable ruff ICN001 rule (TheAlgorithms#11329)
* Enable ruff ICN001 rule * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
1 parent efb7463 commit f2246ce

File tree

8 files changed

+121
-128
lines changed

8 files changed

+121
-128
lines changed
 

‎ciphers/hill_cipher.py

Lines changed: 18 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,7 @@
3838

3939
import string
4040

41-
import numpy
41+
import numpy as np
4242

4343
from maths.greatest_common_divisor import greatest_common_divisor
4444

@@ -49,11 +49,11 @@ class HillCipher:
4949
# i.e. a total of 36 characters
5050

5151
# take x and return x % len(key_string)
52-
modulus = numpy.vectorize(lambda x: x % 36)
52+
modulus = np.vectorize(lambda x: x % 36)
5353

54-
to_int = numpy.vectorize(round)
54+
to_int = np.vectorize(round)
5555

56-
def __init__(self, encrypt_key: numpy.ndarray) -> None:
56+
def __init__(self, encrypt_key: np.ndarray) -> None:
5757
"""
5858
encrypt_key is an NxN numpy array
5959
"""
@@ -63,7 +63,7 @@ def __init__(self, encrypt_key: numpy.ndarray) -> None:
6363

6464
def replace_letters(self, letter: str) -> int:
6565
"""
66-
>>> hill_cipher = HillCipher(numpy.array([[2, 5], [1, 6]]))
66+
>>> hill_cipher = HillCipher(np.array([[2, 5], [1, 6]]))
6767
>>> hill_cipher.replace_letters('T')
6868
19
6969
>>> hill_cipher.replace_letters('0')
@@ -73,7 +73,7 @@ def replace_letters(self, letter: str) -> int:
7373

7474
def replace_digits(self, num: int) -> str:
7575
"""
76-
>>> hill_cipher = HillCipher(numpy.array([[2, 5], [1, 6]]))
76+
>>> hill_cipher = HillCipher(np.array([[2, 5], [1, 6]]))
7777
>>> hill_cipher.replace_digits(19)
7878
'T'
7979
>>> hill_cipher.replace_digits(26)
@@ -83,10 +83,10 @@ def replace_digits(self, num: int) -> str:
8383

8484
def check_determinant(self) -> None:
8585
"""
86-
>>> hill_cipher = HillCipher(numpy.array([[2, 5], [1, 6]]))
86+
>>> hill_cipher = HillCipher(np.array([[2, 5], [1, 6]]))
8787
>>> hill_cipher.check_determinant()
8888
"""
89-
det = round(numpy.linalg.det(self.encrypt_key))
89+
det = round(np.linalg.det(self.encrypt_key))
9090

9191
if det < 0:
9292
det = det % len(self.key_string)
@@ -101,7 +101,7 @@ def check_determinant(self) -> None:
101101

102102
def process_text(self, text: str) -> str:
103103
"""
104-
>>> hill_cipher = HillCipher(numpy.array([[2, 5], [1, 6]]))
104+
>>> hill_cipher = HillCipher(np.array([[2, 5], [1, 6]]))
105105
>>> hill_cipher.process_text('Testing Hill Cipher')
106106
'TESTINGHILLCIPHERR'
107107
>>> hill_cipher.process_text('hello')
@@ -117,7 +117,7 @@ def process_text(self, text: str) -> str:
117117

118118
def encrypt(self, text: str) -> str:
119119
"""
120-
>>> hill_cipher = HillCipher(numpy.array([[2, 5], [1, 6]]))
120+
>>> hill_cipher = HillCipher(np.array([[2, 5], [1, 6]]))
121121
>>> hill_cipher.encrypt('testing hill cipher')
122122
'WHXYJOLM9C6XT085LL'
123123
>>> hill_cipher.encrypt('hello')
@@ -129,7 +129,7 @@ def encrypt(self, text: str) -> str:
129129
for i in range(0, len(text) - self.break_key + 1, self.break_key):
130130
batch = text[i : i + self.break_key]
131131
vec = [self.replace_letters(char) for char in batch]
132-
batch_vec = numpy.array([vec]).T
132+
batch_vec = np.array([vec]).T
133133
batch_encrypted = self.modulus(self.encrypt_key.dot(batch_vec)).T.tolist()[
134134
0
135135
]
@@ -140,14 +140,14 @@ def encrypt(self, text: str) -> str:
140140

141141
return encrypted
142142

143-
def make_decrypt_key(self) -> numpy.ndarray:
143+
def make_decrypt_key(self) -> np.ndarray:
144144
"""
145-
>>> hill_cipher = HillCipher(numpy.array([[2, 5], [1, 6]]))
145+
>>> hill_cipher = HillCipher(np.array([[2, 5], [1, 6]]))
146146
>>> hill_cipher.make_decrypt_key()
147147
array([[ 6, 25],
148148
[ 5, 26]])
149149
"""
150-
det = round(numpy.linalg.det(self.encrypt_key))
150+
det = round(np.linalg.det(self.encrypt_key))
151151

152152
if det < 0:
153153
det = det % len(self.key_string)
@@ -158,16 +158,14 @@ def make_decrypt_key(self) -> numpy.ndarray:
158158
break
159159

160160
inv_key = (
161-
det_inv
162-
* numpy.linalg.det(self.encrypt_key)
163-
* numpy.linalg.inv(self.encrypt_key)
161+
det_inv * np.linalg.det(self.encrypt_key) * np.linalg.inv(self.encrypt_key)
164162
)
165163

166164
return self.to_int(self.modulus(inv_key))
167165

168166
def decrypt(self, text: str) -> str:
169167
"""
170-
>>> hill_cipher = HillCipher(numpy.array([[2, 5], [1, 6]]))
168+
>>> hill_cipher = HillCipher(np.array([[2, 5], [1, 6]]))
171169
>>> hill_cipher.decrypt('WHXYJOLM9C6XT085LL')
172170
'TESTINGHILLCIPHERR'
173171
>>> hill_cipher.decrypt('85FF00')
@@ -180,7 +178,7 @@ def decrypt(self, text: str) -> str:
180178
for i in range(0, len(text) - self.break_key + 1, self.break_key):
181179
batch = text[i : i + self.break_key]
182180
vec = [self.replace_letters(char) for char in batch]
183-
batch_vec = numpy.array([vec]).T
181+
batch_vec = np.array([vec]).T
184182
batch_decrypted = self.modulus(decrypt_key.dot(batch_vec)).T.tolist()[0]
185183
decrypted_batch = "".join(
186184
self.replace_digits(num) for num in batch_decrypted
@@ -199,7 +197,7 @@ def main() -> None:
199197
row = [int(x) for x in input().split()]
200198
hill_matrix.append(row)
201199

202-
hc = HillCipher(numpy.array(hill_matrix))
200+
hc = HillCipher(np.array(hill_matrix))
203201

204202
print("Would you like to encrypt or decrypt some text? (1 or 2)")
205203
option = input("\n1. Encrypt\n2. Decrypt\n")

‎fractals/julia_sets.py

Lines changed: 26 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -25,8 +25,8 @@
2525
from collections.abc import Callable
2626
from typing import Any
2727

28-
import numpy
29-
from matplotlib import pyplot
28+
import matplotlib.pyplot as plt
29+
import numpy as np
3030

3131
c_cauliflower = 0.25 + 0.0j
3232
c_polynomial_1 = -0.4 + 0.6j
@@ -37,22 +37,20 @@
3737
nb_pixels = 666
3838

3939

40-
def eval_exponential(c_parameter: complex, z_values: numpy.ndarray) -> numpy.ndarray:
40+
def eval_exponential(c_parameter: complex, z_values: np.ndarray) -> np.ndarray:
4141
"""
4242
Evaluate $e^z + c$.
4343
>>> eval_exponential(0, 0)
4444
1.0
45-
>>> abs(eval_exponential(1, numpy.pi*1.j)) < 1e-15
45+
>>> abs(eval_exponential(1, np.pi*1.j)) < 1e-15
4646
True
4747
>>> abs(eval_exponential(1.j, 0)-1-1.j) < 1e-15
4848
True
4949
"""
50-
return numpy.exp(z_values) + c_parameter
50+
return np.exp(z_values) + c_parameter
5151

5252

53-
def eval_quadratic_polynomial(
54-
c_parameter: complex, z_values: numpy.ndarray
55-
) -> numpy.ndarray:
53+
def eval_quadratic_polynomial(c_parameter: complex, z_values: np.ndarray) -> np.ndarray:
5654
"""
5755
>>> eval_quadratic_polynomial(0, 2)
5856
4
@@ -66,7 +64,7 @@ def eval_quadratic_polynomial(
6664
return z_values * z_values + c_parameter
6765

6866

69-
def prepare_grid(window_size: float, nb_pixels: int) -> numpy.ndarray:
67+
def prepare_grid(window_size: float, nb_pixels: int) -> np.ndarray:
7068
"""
7169
Create a grid of complex values of size nb_pixels*nb_pixels with real and
7270
imaginary parts ranging from -window_size to window_size (inclusive).
@@ -77,74 +75,74 @@ def prepare_grid(window_size: float, nb_pixels: int) -> numpy.ndarray:
7775
[ 0.-1.j, 0.+0.j, 0.+1.j],
7876
[ 1.-1.j, 1.+0.j, 1.+1.j]])
7977
"""
80-
x = numpy.linspace(-window_size, window_size, nb_pixels)
78+
x = np.linspace(-window_size, window_size, nb_pixels)
8179
x = x.reshape((nb_pixels, 1))
82-
y = numpy.linspace(-window_size, window_size, nb_pixels)
80+
y = np.linspace(-window_size, window_size, nb_pixels)
8381
y = y.reshape((1, nb_pixels))
8482
return x + 1.0j * y
8583

8684

8785
def iterate_function(
88-
eval_function: Callable[[Any, numpy.ndarray], numpy.ndarray],
86+
eval_function: Callable[[Any, np.ndarray], np.ndarray],
8987
function_params: Any,
9088
nb_iterations: int,
91-
z_0: numpy.ndarray,
89+
z_0: np.ndarray,
9290
infinity: float | None = None,
93-
) -> numpy.ndarray:
91+
) -> np.ndarray:
9492
"""
9593
Iterate the function "eval_function" exactly nb_iterations times.
9694
The first argument of the function is a parameter which is contained in
9795
function_params. The variable z_0 is an array that contains the initial
9896
values to iterate from.
9997
This function returns the final iterates.
10098
101-
>>> iterate_function(eval_quadratic_polynomial, 0, 3, numpy.array([0,1,2])).shape
99+
>>> iterate_function(eval_quadratic_polynomial, 0, 3, np.array([0,1,2])).shape
102100
(3,)
103-
>>> numpy.round(iterate_function(eval_quadratic_polynomial,
101+
>>> np.round(iterate_function(eval_quadratic_polynomial,
104102
... 0,
105103
... 3,
106-
... numpy.array([0,1,2]))[0])
104+
... np.array([0,1,2]))[0])
107105
0j
108-
>>> numpy.round(iterate_function(eval_quadratic_polynomial,
106+
>>> np.round(iterate_function(eval_quadratic_polynomial,
109107
... 0,
110108
... 3,
111-
... numpy.array([0,1,2]))[1])
109+
... np.array([0,1,2]))[1])
112110
(1+0j)
113-
>>> numpy.round(iterate_function(eval_quadratic_polynomial,
111+
>>> np.round(iterate_function(eval_quadratic_polynomial,
114112
... 0,
115113
... 3,
116-
... numpy.array([0,1,2]))[2])
114+
... np.array([0,1,2]))[2])
117115
(256+0j)
118116
"""
119117

120118
z_n = z_0.astype("complex64")
121119
for _ in range(nb_iterations):
122120
z_n = eval_function(function_params, z_n)
123121
if infinity is not None:
124-
numpy.nan_to_num(z_n, copy=False, nan=infinity)
125-
z_n[abs(z_n) == numpy.inf] = infinity
122+
np.nan_to_num(z_n, copy=False, nan=infinity)
123+
z_n[abs(z_n) == np.inf] = infinity
126124
return z_n
127125

128126

129127
def show_results(
130128
function_label: str,
131129
function_params: Any,
132130
escape_radius: float,
133-
z_final: numpy.ndarray,
131+
z_final: np.ndarray,
134132
) -> None:
135133
"""
136134
Plots of whether the absolute value of z_final is greater than
137135
the value of escape_radius. Adds the function_label and function_params to
138136
the title.
139137
140-
>>> show_results('80', 0, 1, numpy.array([[0,1,.5],[.4,2,1.1],[.2,1,1.3]]))
138+
>>> show_results('80', 0, 1, np.array([[0,1,.5],[.4,2,1.1],[.2,1,1.3]]))
141139
"""
142140

143141
abs_z_final = (abs(z_final)).transpose()
144142
abs_z_final[:, :] = abs_z_final[::-1, :]
145-
pyplot.matshow(abs_z_final < escape_radius)
146-
pyplot.title(f"Julia set of ${function_label}$, $c={function_params}$")
147-
pyplot.show()
143+
plt.matshow(abs_z_final < escape_radius)
144+
plt.title(f"Julia set of ${function_label}$, $c={function_params}$")
145+
plt.show()
148146

149147

150148
def ignore_overflow_warnings() -> None:

‎fractals/koch_snowflake.py

Lines changed: 17 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -22,25 +22,25 @@
2222

2323
from __future__ import annotations
2424

25-
import matplotlib.pyplot as plt # type: ignore
26-
import numpy
25+
import matplotlib.pyplot as plt
26+
import numpy as np
2727

2828
# initial triangle of Koch snowflake
29-
VECTOR_1 = numpy.array([0, 0])
30-
VECTOR_2 = numpy.array([0.5, 0.8660254])
31-
VECTOR_3 = numpy.array([1, 0])
29+
VECTOR_1 = np.array([0, 0])
30+
VECTOR_2 = np.array([0.5, 0.8660254])
31+
VECTOR_3 = np.array([1, 0])
3232
INITIAL_VECTORS = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
3333

3434
# uncomment for simple Koch curve instead of Koch snowflake
3535
# INITIAL_VECTORS = [VECTOR_1, VECTOR_3]
3636

3737

38-
def iterate(initial_vectors: list[numpy.ndarray], steps: int) -> list[numpy.ndarray]:
38+
def iterate(initial_vectors: list[np.ndarray], steps: int) -> list[np.ndarray]:
3939
"""
4040
Go through the number of iterations determined by the argument "steps".
4141
Be careful with high values (above 5) since the time to calculate increases
4242
exponentially.
43-
>>> iterate([numpy.array([0, 0]), numpy.array([1, 0])], 1)
43+
>>> iterate([np.array([0, 0]), np.array([1, 0])], 1)
4444
[array([0, 0]), array([0.33333333, 0. ]), array([0.5 , \
4545
0.28867513]), array([0.66666667, 0. ]), array([1, 0])]
4646
"""
@@ -50,13 +50,13 @@ def iterate(initial_vectors: list[numpy.ndarray], steps: int) -> list[numpy.ndar
5050
return vectors
5151

5252

53-
def iteration_step(vectors: list[numpy.ndarray]) -> list[numpy.ndarray]:
53+
def iteration_step(vectors: list[np.ndarray]) -> list[np.ndarray]:
5454
"""
5555
Loops through each pair of adjacent vectors. Each line between two adjacent
5656
vectors is divided into 4 segments by adding 3 additional vectors in-between
5757
the original two vectors. The vector in the middle is constructed through a
5858
60 degree rotation so it is bent outwards.
59-
>>> iteration_step([numpy.array([0, 0]), numpy.array([1, 0])])
59+
>>> iteration_step([np.array([0, 0]), np.array([1, 0])])
6060
[array([0, 0]), array([0.33333333, 0. ]), array([0.5 , \
6161
0.28867513]), array([0.66666667, 0. ]), array([1, 0])]
6262
"""
@@ -74,22 +74,22 @@ def iteration_step(vectors: list[numpy.ndarray]) -> list[numpy.ndarray]:
7474
return new_vectors
7575

7676

77-
def rotate(vector: numpy.ndarray, angle_in_degrees: float) -> numpy.ndarray:
77+
def rotate(vector: np.ndarray, angle_in_degrees: float) -> np.ndarray:
7878
"""
7979
Standard rotation of a 2D vector with a rotation matrix
8080
(see https://en.wikipedia.org/wiki/Rotation_matrix )
81-
>>> rotate(numpy.array([1, 0]), 60)
81+
>>> rotate(np.array([1, 0]), 60)
8282
array([0.5 , 0.8660254])
83-
>>> rotate(numpy.array([1, 0]), 90)
83+
>>> rotate(np.array([1, 0]), 90)
8484
array([6.123234e-17, 1.000000e+00])
8585
"""
86-
theta = numpy.radians(angle_in_degrees)
87-
c, s = numpy.cos(theta), numpy.sin(theta)
88-
rotation_matrix = numpy.array(((c, -s), (s, c)))
89-
return numpy.dot(rotation_matrix, vector)
86+
theta = np.radians(angle_in_degrees)
87+
c, s = np.cos(theta), np.sin(theta)
88+
rotation_matrix = np.array(((c, -s), (s, c)))
89+
return np.dot(rotation_matrix, vector)
9090

9191

92-
def plot(vectors: list[numpy.ndarray]) -> None:
92+
def plot(vectors: list[np.ndarray]) -> None:
9393
"""
9494
Utility function to plot the vectors using matplotlib.pyplot
9595
No doctest was implemented since this function does not have a return value

‎graphics/bezier_curve.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -78,7 +78,7 @@ def plot_curve(self, step_size: float = 0.01):
7878
step_size: defines the step(s) at which to evaluate the Bezier curve.
7979
The smaller the step size, the finer the curve produced.
8080
"""
81-
from matplotlib import pyplot as plt # type: ignore
81+
from matplotlib import pyplot as plt
8282

8383
to_plot_x: list[float] = [] # x coordinates of points to plot
8484
to_plot_y: list[float] = [] # y coordinates of points to plot

‎machine_learning/gradient_descent.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33
function.
44
"""
55

6-
import numpy
6+
import numpy as np
77

88
# List of input, output pairs
99
train_data = (
@@ -116,7 +116,7 @@ def run_gradient_descent():
116116
temp_parameter_vector[i] = (
117117
parameter_vector[i] - LEARNING_RATE * cost_derivative
118118
)
119-
if numpy.allclose(
119+
if np.allclose(
120120
parameter_vector,
121121
temp_parameter_vector,
122122
atol=absolute_error_limit,

‎neural_network/input_data.py

Lines changed: 16 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@
2222
import typing
2323
import urllib
2424

25-
import numpy
25+
import numpy as np
2626
from tensorflow.python.framework import dtypes, random_seed
2727
from tensorflow.python.platform import gfile
2828
from tensorflow.python.util.deprecation import deprecated
@@ -39,8 +39,8 @@ class _Datasets(typing.NamedTuple):
3939

4040

4141
def _read32(bytestream):
42-
dt = numpy.dtype(numpy.uint32).newbyteorder(">")
43-
return numpy.frombuffer(bytestream.read(4), dtype=dt)[0]
42+
dt = np.dtype(np.uint32).newbyteorder(">")
43+
return np.frombuffer(bytestream.read(4), dtype=dt)[0]
4444

4545

4646
@deprecated(None, "Please use tf.data to implement this functionality.")
@@ -68,7 +68,7 @@ def _extract_images(f):
6868
rows = _read32(bytestream)
6969
cols = _read32(bytestream)
7070
buf = bytestream.read(rows * cols * num_images)
71-
data = numpy.frombuffer(buf, dtype=numpy.uint8)
71+
data = np.frombuffer(buf, dtype=np.uint8)
7272
data = data.reshape(num_images, rows, cols, 1)
7373
return data
7474

@@ -77,8 +77,8 @@ def _extract_images(f):
7777
def _dense_to_one_hot(labels_dense, num_classes):
7878
"""Convert class labels from scalars to one-hot vectors."""
7979
num_labels = labels_dense.shape[0]
80-
index_offset = numpy.arange(num_labels) * num_classes
81-
labels_one_hot = numpy.zeros((num_labels, num_classes))
80+
index_offset = np.arange(num_labels) * num_classes
81+
labels_one_hot = np.zeros((num_labels, num_classes))
8282
labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1
8383
return labels_one_hot
8484

@@ -107,7 +107,7 @@ def _extract_labels(f, one_hot=False, num_classes=10):
107107
)
108108
num_items = _read32(bytestream)
109109
buf = bytestream.read(num_items)
110-
labels = numpy.frombuffer(buf, dtype=numpy.uint8)
110+
labels = np.frombuffer(buf, dtype=np.uint8)
111111
if one_hot:
112112
return _dense_to_one_hot(labels, num_classes)
113113
return labels
@@ -153,7 +153,7 @@ def __init__(
153153
"""
154154
seed1, seed2 = random_seed.get_seed(seed)
155155
# If op level seed is not set, use whatever graph level seed is returned
156-
numpy.random.seed(seed1 if seed is None else seed2)
156+
np.random.seed(seed1 if seed is None else seed2)
157157
dtype = dtypes.as_dtype(dtype).base_dtype
158158
if dtype not in (dtypes.uint8, dtypes.float32):
159159
raise TypeError("Invalid image dtype %r, expected uint8 or float32" % dtype)
@@ -175,8 +175,8 @@ def __init__(
175175
)
176176
if dtype == dtypes.float32:
177177
# Convert from [0, 255] -> [0.0, 1.0].
178-
images = images.astype(numpy.float32)
179-
images = numpy.multiply(images, 1.0 / 255.0)
178+
images = images.astype(np.float32)
179+
images = np.multiply(images, 1.0 / 255.0)
180180
self._images = images
181181
self._labels = labels
182182
self._epochs_completed = 0
@@ -210,8 +210,8 @@ def next_batch(self, batch_size, fake_data=False, shuffle=True):
210210
start = self._index_in_epoch
211211
# Shuffle for the first epoch
212212
if self._epochs_completed == 0 and start == 0 and shuffle:
213-
perm0 = numpy.arange(self._num_examples)
214-
numpy.random.shuffle(perm0)
213+
perm0 = np.arange(self._num_examples)
214+
np.random.shuffle(perm0)
215215
self._images = self.images[perm0]
216216
self._labels = self.labels[perm0]
217217
# Go to the next epoch
@@ -224,8 +224,8 @@ def next_batch(self, batch_size, fake_data=False, shuffle=True):
224224
labels_rest_part = self._labels[start : self._num_examples]
225225
# Shuffle the data
226226
if shuffle:
227-
perm = numpy.arange(self._num_examples)
228-
numpy.random.shuffle(perm)
227+
perm = np.arange(self._num_examples)
228+
np.random.shuffle(perm)
229229
self._images = self.images[perm]
230230
self._labels = self.labels[perm]
231231
# Start next epoch
@@ -235,8 +235,8 @@ def next_batch(self, batch_size, fake_data=False, shuffle=True):
235235
images_new_part = self._images[start:end]
236236
labels_new_part = self._labels[start:end]
237237
return (
238-
numpy.concatenate((images_rest_part, images_new_part), axis=0),
239-
numpy.concatenate((labels_rest_part, labels_new_part), axis=0),
238+
np.concatenate((images_rest_part, images_new_part), axis=0),
239+
np.concatenate((labels_rest_part, labels_new_part), axis=0),
240240
)
241241
else:
242242
self._index_in_epoch += batch_size

‎neural_network/two_hidden_layers_neural_network.py

Lines changed: 41 additions & 43 deletions
Original file line numberDiff line numberDiff line change
@@ -5,11 +5,11 @@
55
- https://en.wikipedia.org/wiki/Feedforward_neural_network (Feedforward)
66
"""
77

8-
import numpy
8+
import numpy as np
99

1010

1111
class TwoHiddenLayerNeuralNetwork:
12-
def __init__(self, input_array: numpy.ndarray, output_array: numpy.ndarray) -> None:
12+
def __init__(self, input_array: np.ndarray, output_array: np.ndarray) -> None:
1313
"""
1414
This function initializes the TwoHiddenLayerNeuralNetwork class with random
1515
weights for every layer and initializes predicted output with zeroes.
@@ -28,30 +28,28 @@ def __init__(self, input_array: numpy.ndarray, output_array: numpy.ndarray) -> N
2828
# Random initial weights are assigned.
2929
# self.input_array.shape[1] is used to represent number of nodes in input layer.
3030
# First hidden layer consists of 4 nodes.
31-
self.input_layer_and_first_hidden_layer_weights = numpy.random.rand(
31+
self.input_layer_and_first_hidden_layer_weights = np.random.rand(
3232
self.input_array.shape[1], 4
3333
)
3434

3535
# Random initial values for the first hidden layer.
3636
# First hidden layer has 4 nodes.
3737
# Second hidden layer has 3 nodes.
38-
self.first_hidden_layer_and_second_hidden_layer_weights = numpy.random.rand(
39-
4, 3
40-
)
38+
self.first_hidden_layer_and_second_hidden_layer_weights = np.random.rand(4, 3)
4139

4240
# Random initial values for the second hidden layer.
4341
# Second hidden layer has 3 nodes.
4442
# Output layer has 1 node.
45-
self.second_hidden_layer_and_output_layer_weights = numpy.random.rand(3, 1)
43+
self.second_hidden_layer_and_output_layer_weights = np.random.rand(3, 1)
4644

4745
# Real output values provided.
4846
self.output_array = output_array
4947

5048
# Predicted output values by the neural network.
5149
# Predicted_output array initially consists of zeroes.
52-
self.predicted_output = numpy.zeros(output_array.shape)
50+
self.predicted_output = np.zeros(output_array.shape)
5351

54-
def feedforward(self) -> numpy.ndarray:
52+
def feedforward(self) -> np.ndarray:
5553
"""
5654
The information moves in only one direction i.e. forward from the input nodes,
5755
through the two hidden nodes and to the output nodes.
@@ -60,24 +58,24 @@ def feedforward(self) -> numpy.ndarray:
6058
Return layer_between_second_hidden_layer_and_output
6159
(i.e the last layer of the neural network).
6260
63-
>>> input_val = numpy.array(([0, 0, 0], [0, 0, 0], [0, 0, 0]), dtype=float)
64-
>>> output_val = numpy.array(([0], [0], [0]), dtype=float)
61+
>>> input_val = np.array(([0, 0, 0], [0, 0, 0], [0, 0, 0]), dtype=float)
62+
>>> output_val = np.array(([0], [0], [0]), dtype=float)
6563
>>> nn = TwoHiddenLayerNeuralNetwork(input_val, output_val)
6664
>>> res = nn.feedforward()
67-
>>> array_sum = numpy.sum(res)
68-
>>> numpy.isnan(array_sum)
65+
>>> array_sum = np.sum(res)
66+
>>> np.isnan(array_sum)
6967
False
7068
"""
7169
# Layer_between_input_and_first_hidden_layer is the layer connecting the
7270
# input nodes with the first hidden layer nodes.
7371
self.layer_between_input_and_first_hidden_layer = sigmoid(
74-
numpy.dot(self.input_array, self.input_layer_and_first_hidden_layer_weights)
72+
np.dot(self.input_array, self.input_layer_and_first_hidden_layer_weights)
7573
)
7674

7775
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
7876
# connecting the first hidden set of nodes with the second hidden set of nodes.
7977
self.layer_between_first_hidden_layer_and_second_hidden_layer = sigmoid(
80-
numpy.dot(
78+
np.dot(
8179
self.layer_between_input_and_first_hidden_layer,
8280
self.first_hidden_layer_and_second_hidden_layer_weights,
8381
)
@@ -86,7 +84,7 @@ def feedforward(self) -> numpy.ndarray:
8684
# layer_between_second_hidden_layer_and_output is the layer connecting
8785
# second hidden layer with the output node.
8886
self.layer_between_second_hidden_layer_and_output = sigmoid(
89-
numpy.dot(
87+
np.dot(
9088
self.layer_between_first_hidden_layer_and_second_hidden_layer,
9189
self.second_hidden_layer_and_output_layer_weights,
9290
)
@@ -100,8 +98,8 @@ def back_propagation(self) -> None:
10098
error rate obtained in the previous epoch (i.e., iteration).
10199
Updation is done using derivative of sogmoid activation function.
102100
103-
>>> input_val = numpy.array(([0, 0, 0], [0, 0, 0], [0, 0, 0]), dtype=float)
104-
>>> output_val = numpy.array(([0], [0], [0]), dtype=float)
101+
>>> input_val = np.array(([0, 0, 0], [0, 0, 0], [0, 0, 0]), dtype=float)
102+
>>> output_val = np.array(([0], [0], [0]), dtype=float)
105103
>>> nn = TwoHiddenLayerNeuralNetwork(input_val, output_val)
106104
>>> res = nn.feedforward()
107105
>>> nn.back_propagation()
@@ -110,15 +108,15 @@ def back_propagation(self) -> None:
110108
False
111109
"""
112110

113-
updated_second_hidden_layer_and_output_layer_weights = numpy.dot(
111+
updated_second_hidden_layer_and_output_layer_weights = np.dot(
114112
self.layer_between_first_hidden_layer_and_second_hidden_layer.T,
115113
2
116114
* (self.output_array - self.predicted_output)
117115
* sigmoid_derivative(self.predicted_output),
118116
)
119-
updated_first_hidden_layer_and_second_hidden_layer_weights = numpy.dot(
117+
updated_first_hidden_layer_and_second_hidden_layer_weights = np.dot(
120118
self.layer_between_input_and_first_hidden_layer.T,
121-
numpy.dot(
119+
np.dot(
122120
2
123121
* (self.output_array - self.predicted_output)
124122
* sigmoid_derivative(self.predicted_output),
@@ -128,10 +126,10 @@ def back_propagation(self) -> None:
128126
self.layer_between_first_hidden_layer_and_second_hidden_layer
129127
),
130128
)
131-
updated_input_layer_and_first_hidden_layer_weights = numpy.dot(
129+
updated_input_layer_and_first_hidden_layer_weights = np.dot(
132130
self.input_array.T,
133-
numpy.dot(
134-
numpy.dot(
131+
np.dot(
132+
np.dot(
135133
2
136134
* (self.output_array - self.predicted_output)
137135
* sigmoid_derivative(self.predicted_output),
@@ -155,7 +153,7 @@ def back_propagation(self) -> None:
155153
updated_second_hidden_layer_and_output_layer_weights
156154
)
157155

158-
def train(self, output: numpy.ndarray, iterations: int, give_loss: bool) -> None:
156+
def train(self, output: np.ndarray, iterations: int, give_loss: bool) -> None:
159157
"""
160158
Performs the feedforwarding and back propagation process for the
161159
given number of iterations.
@@ -166,8 +164,8 @@ def train(self, output: numpy.ndarray, iterations: int, give_loss: bool) -> None
166164
give_loss : boolean value, If True then prints loss for each iteration,
167165
If False then nothing is printed
168166
169-
>>> input_val = numpy.array(([0, 0, 0], [0, 1, 0], [0, 0, 1]), dtype=float)
170-
>>> output_val = numpy.array(([0], [1], [1]), dtype=float)
167+
>>> input_val = np.array(([0, 0, 0], [0, 1, 0], [0, 0, 1]), dtype=float)
168+
>>> output_val = np.array(([0], [1], [1]), dtype=float)
171169
>>> nn = TwoHiddenLayerNeuralNetwork(input_val, output_val)
172170
>>> first_iteration_weights = nn.feedforward()
173171
>>> nn.back_propagation()
@@ -179,10 +177,10 @@ def train(self, output: numpy.ndarray, iterations: int, give_loss: bool) -> None
179177
self.output = self.feedforward()
180178
self.back_propagation()
181179
if give_loss:
182-
loss = numpy.mean(numpy.square(output - self.feedforward()))
180+
loss = np.mean(np.square(output - self.feedforward()))
183181
print(f"Iteration {iteration} Loss: {loss}")
184182

185-
def predict(self, input_arr: numpy.ndarray) -> int:
183+
def predict(self, input_arr: np.ndarray) -> int:
186184
"""
187185
Predict's the output for the given input values using
188186
the trained neural network.
@@ -192,8 +190,8 @@ def predict(self, input_arr: numpy.ndarray) -> int:
192190
than the threshold value else returns 0,
193191
as the real output values are in binary.
194192
195-
>>> input_val = numpy.array(([0, 0, 0], [0, 1, 0], [0, 0, 1]), dtype=float)
196-
>>> output_val = numpy.array(([0], [1], [1]), dtype=float)
193+
>>> input_val = np.array(([0, 0, 0], [0, 1, 0], [0, 0, 1]), dtype=float)
194+
>>> output_val = np.array(([0], [1], [1]), dtype=float)
197195
>>> nn = TwoHiddenLayerNeuralNetwork(input_val, output_val)
198196
>>> nn.train(output_val, 1000, False)
199197
>>> nn.predict([0, 1, 0]) in (0, 1)
@@ -204,18 +202,18 @@ def predict(self, input_arr: numpy.ndarray) -> int:
204202
self.array = input_arr
205203

206204
self.layer_between_input_and_first_hidden_layer = sigmoid(
207-
numpy.dot(self.array, self.input_layer_and_first_hidden_layer_weights)
205+
np.dot(self.array, self.input_layer_and_first_hidden_layer_weights)
208206
)
209207

210208
self.layer_between_first_hidden_layer_and_second_hidden_layer = sigmoid(
211-
numpy.dot(
209+
np.dot(
212210
self.layer_between_input_and_first_hidden_layer,
213211
self.first_hidden_layer_and_second_hidden_layer_weights,
214212
)
215213
)
216214

217215
self.layer_between_second_hidden_layer_and_output = sigmoid(
218-
numpy.dot(
216+
np.dot(
219217
self.layer_between_first_hidden_layer_and_second_hidden_layer,
220218
self.second_hidden_layer_and_output_layer_weights,
221219
)
@@ -224,26 +222,26 @@ def predict(self, input_arr: numpy.ndarray) -> int:
224222
return int((self.layer_between_second_hidden_layer_and_output > 0.6)[0])
225223

226224

227-
def sigmoid(value: numpy.ndarray) -> numpy.ndarray:
225+
def sigmoid(value: np.ndarray) -> np.ndarray:
228226
"""
229227
Applies sigmoid activation function.
230228
231229
return normalized values
232230
233-
>>> sigmoid(numpy.array(([1, 0, 2], [1, 0, 0]), dtype=numpy.float64))
231+
>>> sigmoid(np.array(([1, 0, 2], [1, 0, 0]), dtype=np.float64))
234232
array([[0.73105858, 0.5 , 0.88079708],
235233
[0.73105858, 0.5 , 0.5 ]])
236234
"""
237-
return 1 / (1 + numpy.exp(-value))
235+
return 1 / (1 + np.exp(-value))
238236

239237

240-
def sigmoid_derivative(value: numpy.ndarray) -> numpy.ndarray:
238+
def sigmoid_derivative(value: np.ndarray) -> np.ndarray:
241239
"""
242240
Provides the derivative value of the sigmoid function.
243241
244242
returns derivative of the sigmoid value
245243
246-
>>> sigmoid_derivative(numpy.array(([1, 0, 2], [1, 0, 0]), dtype=numpy.float64))
244+
>>> sigmoid_derivative(np.array(([1, 0, 2], [1, 0, 0]), dtype=np.float64))
247245
array([[ 0., 0., -2.],
248246
[ 0., 0., 0.]])
249247
"""
@@ -264,7 +262,7 @@ def example() -> int:
264262
True
265263
"""
266264
# Input values.
267-
test_input = numpy.array(
265+
test_input = np.array(
268266
(
269267
[0, 0, 0],
270268
[0, 0, 1],
@@ -275,11 +273,11 @@ def example() -> int:
275273
[1, 1, 0],
276274
[1, 1, 1],
277275
),
278-
dtype=numpy.float64,
276+
dtype=np.float64,
279277
)
280278

281279
# True output values for the given input values.
282-
output = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]), dtype=numpy.float64)
280+
output = np.array(([0], [1], [1], [0], [1], [0], [0], [1]), dtype=np.float64)
283281

284282
# Calling neural network class.
285283
neural_network = TwoHiddenLayerNeuralNetwork(
@@ -290,7 +288,7 @@ def example() -> int:
290288
# Set give_loss to True if you want to see loss in every iteration.
291289
neural_network.train(output=output, iterations=10, give_loss=False)
292290

293-
return neural_network.predict(numpy.array(([1, 1, 1]), dtype=numpy.float64))
291+
return neural_network.predict(np.array(([1, 1, 1]), dtype=np.float64))
294292

295293

296294
if __name__ == "__main__":

‎pyproject.toml

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,6 @@ lint.ignore = [ # `ruff rule S101` for a description of that rule
66
"EM101", # Exception must not use a string literal, assign to variable first
77
"EXE001", # Shebang is present but file is not executable" -- FIX ME
88
"G004", # Logging statement uses f-string
9-
"ICN001", # `matplotlib.pyplot` should be imported as `plt` -- FIX ME
109
"INP001", # File `x/y/z.py` is part of an implicit namespace package. Add an `__init__.py`. -- FIX ME
1110
"NPY002", # Replace legacy `np.random.choice` call with `np.random.Generator` -- FIX ME
1211
"PGH003", # Use specific rule codes when ignoring type issues -- FIX ME

0 commit comments

Comments
 (0)
Please sign in to comment.