@@ -6,12 +6,12 @@ class SimpleANN:
6
6
Simple Artificial Neural Network (ANN)
7
7
8
8
- Feedforward Neural Network with 1 hidden layer and Sigmoid activation.
9
- - Uses Gradient Descent for backpropagation and Mean Squared Error (MSE)
10
- as the loss function.
9
+ - Uses Gradient Descent for backpropagation and Mean Squared Error (MSE)
10
+ as the loss function.
11
11
- Example demonstrates solving the XOR problem.
12
12
"""
13
13
14
- def __init__ (self , input_size : int , hidden_size : int , output_size : int ,
14
+ def __init__ (self , input_size : int , hidden_size : int , output_size : int ,
15
15
learning_rate : float = 0.1 ) -> None :
16
16
"""
17
17
Initialize the neural network with random weights and biases.
@@ -63,7 +63,7 @@ def sigmoid_derivative(self, sigmoid_output: np.ndarray) -> np.ndarray:
63
63
64
64
Example:
65
65
>>> ann = SimpleANN(2, 2, 1)
66
- >>> output = ann.sigmoid(np.array([0.5 ]))
66
+ >>> output = ann.sigmoid(np.array([0])) # Use input 0 for testing
67
67
>>> ann.sigmoid_derivative(output)
68
68
array([0.25])
69
69
"""
@@ -85,16 +85,16 @@ def feedforward(self, inputs: np.ndarray) -> np.ndarray:
85
85
>>> ann.feedforward(inputs).shape
86
86
(2, 1)
87
87
"""
88
- self .hidden_input = (np .dot (inputs , self .weights_input_hidden ) +
89
- self .bias_hidden )
88
+ self .hidden_input = np .dot (inputs , self .weights_input_hidden ) + self .bias_hidden
90
89
self .hidden_output = self .sigmoid (self .hidden_input )
91
- self .final_input = (np .dot (self .hidden_output , self .weights_hidden_output ) +
92
- self .bias_output )
90
+ self .final_input = (
91
+ np .dot (self .hidden_output , self .weights_hidden_output ) + self .bias_output
92
+ )
93
93
self .final_output = self .sigmoid (self .final_input )
94
94
return self .final_output
95
95
96
- def backpropagation (self , inputs : np .ndarray , targets : np .ndarray ,
97
- outputs : np .ndarray ) -> None :
96
+ def backpropagation (self , inputs : np .ndarray , targets : np .ndarray ,
97
+ outputs : np .ndarray ) -> None :
98
98
"""
99
99
Perform backpropagation to adjust the weights and biases.
100
100
@@ -115,36 +115,41 @@ def backpropagation(self, inputs: np.ndarray, targets: np.ndarray,
115
115
hidden_error = output_gradient .dot (self .weights_hidden_output .T )
116
116
hidden_gradient = hidden_error * self .sigmoid_derivative (self .hidden_output )
117
117
118
- self .weights_hidden_output += (self .hidden_output .T .dot (output_gradient ) *
119
- self .learning_rate )
120
- self .bias_output += (np .sum (output_gradient , axis = 0 , keepdims = True ) *
121
- self .learning_rate )
122
-
123
- self .weights_input_hidden += (inputs .T .dot (hidden_gradient ) *
124
- self .learning_rate )
125
- self .bias_hidden += (np .sum (hidden_gradient , axis = 0 , keepdims = True ) *
126
- self .learning_rate )
127
-
128
- def train (self , inputs : np .ndarray , targets : np .ndarray ,
129
- epochs : int = 10000 ) -> None :
118
+ self .weights_hidden_output += (
119
+ self .hidden_output .T .dot (output_gradient ) * self .learning_rate
120
+ )
121
+ self .bias_output += (
122
+ np .sum (output_gradient , axis = 0 , keepdims = True ) * self .learning_rate
123
+ )
124
+
125
+ self .weights_input_hidden += (
126
+ inputs .T .dot (hidden_gradient ) * self .learning_rate
127
+ )
128
+ self .bias_hidden += (
129
+ np .sum (hidden_gradient , axis = 0 , keepdims = True ) * self .learning_rate
130
+ )
131
+
132
+ def train (self , inputs : np .ndarray , targets : np .ndarray ,
133
+ epochs : int = 10000 , verbose : bool = False ) -> None :
130
134
"""
131
135
Train the neural network on the given input and target data.
132
136
133
137
Args:
134
138
inputs (ndarray): Input features for training.
135
139
targets (ndarray): True labels for training.
136
140
epochs (int): Number of training iterations.
141
+ verbose (bool): Whether to print loss every 1000 epochs.
137
142
138
143
Example:
139
144
>>> ann = SimpleANN(2, 2, 1)
140
145
>>> inputs = np.array([[0, 0], [1, 1]])
141
146
>>> targets = np.array([[0], [1]])
142
- >>> ann.train(inputs, targets, epochs=1)
147
+ >>> ann.train(inputs, targets, epochs=1, verbose=False )
143
148
"""
144
149
for epoch in range (epochs ):
145
150
outputs = self .feedforward (inputs )
146
151
self .backpropagation (inputs , targets , outputs )
147
- if epoch % 1000 == 0 :
152
+ if verbose and epoch % 1000 == 0 :
148
153
loss = np .mean (np .square (targets - outputs ))
149
154
print (f"Epoch { epoch } , Loss: { loss } " )
150
155
@@ -165,3 +170,8 @@ def predict(self, inputs: np.ndarray) -> np.ndarray:
165
170
(2, 1)
166
171
"""
167
172
return self .feedforward (inputs )
173
+
174
+ if __name__ == "__main__" :
175
+ import doctest
176
+
177
+ doctest .testmod ()
0 commit comments