Skip to content

Commit 6cbd4ff

Browse files
Resolved Formatting issues
1 parent 7d710c8 commit 6cbd4ff

File tree

2 files changed

+16
-14
lines changed

2 files changed

+16
-14
lines changed

machine_learning/frequent_pattern_growth.py

+3-1
Original file line numberDiff line numberDiff line change
@@ -240,7 +240,9 @@ def ascend_tree(leaf_node: TreeNode, prefix_path: list[str]) -> None:
240240
ascend_tree(leaf_node.parent, prefix_path)
241241

242242

243-
def find_prefix_path(base_pat: frozenset, tree_node: TreeNode | None) -> dict: # noqa: ARG001
243+
def find_prefix_path(
244+
base_pat: frozenset, tree_node: TreeNode | None
245+
) -> dict: # noqa: ARG001
244246
"""
245247
Find the conditional pattern base for a given base pattern.
246248

machine_learning/gradient_descent_momentum.py

+13-13
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
"""
2-
Implementation of gradient descent algorithm using momentum for minimizing cost of a linear hypothesis
3-
function.
2+
Implementation of gradient descent algorithm using momentum
3+
for minimizing cost of a linear hypothesis function.
44
"""
55

66
import numpy as np
@@ -25,23 +25,23 @@
2525

2626
def _error(example_no, data_set="train"):
2727
"""
28-
Calculate the error (difference between predicted and actual output) for a given example.
28+
Calculate the error for a given example.
2929
Args:
3030
example_no (int): Index of the example in the dataset.
3131
data_set (str): The dataset to use, either "train" or "test".
3232
Returns:
33-
float: The difference between the predicted output and the actual output.
33+
float: The difference between predicted output and actual output.
3434
"""
35-
return calculate_hypothesis_value(example_no, data_set) - output(
36-
example_no, data_set
37-
)
35+
hypo_value = calculate_hypothesis_value(example_no, data_set)
36+
output_value = output(example_no, data_set)
37+
return hypo_value - output_value
3838

3939

4040
def _hypothesis_value(data_input_tuple):
4141
"""
4242
Compute the hypothesis value (predicted output) for a given input tuple.
4343
Args:
44-
data_input_tuple (tuple): The input tuple (features) for the example.
44+
data_input_tuple: The input tuple (features) for the example.
4545
Returns:
4646
float: The hypothesis value for the given input.
4747
"""
@@ -54,7 +54,8 @@ def _hypothesis_value(data_input_tuple):
5454

5555
def output(example_no, data_set):
5656
"""
57-
Retrieve the actual output (label) for a given example from the specified dataset.
57+
Retrieve the actual output (label) for a given example
58+
from the specified dataset.
5859
Args:
5960
example_no (int): Index of the example in the dataset.
6061
data_set (str): The dataset to use, either "train" or "test".
@@ -89,7 +90,8 @@ def summation_of_cost_derivative(index, end=m):
8990
Calculate the summation of the cost derivative for a given index.
9091
Args:
9192
index (int): The index of the parameter for which the derivative is calculated.
92-
end (int): The number of examples to consider (defaults to the size of the training set).
93+
end (int): The number of examples to consider
94+
(defaults to the size of the training set).
9395
Returns:
9496
float: The summation of the cost derivatives for the given parameter.
9597
"""
@@ -152,7 +154,5 @@ def test_gradient_descent():
152154

153155
if __name__ == "__main__":
154156
run_gradient_descent_with_momentum()
155-
print(
156-
"\nTesting gradient descent with momentum for a linear hypothesis function.\n"
157-
)
157+
print("\nTesting gradient descent momentum for a linear hypothesis function.\n")
158158
test_gradient_descent()

0 commit comments

Comments
 (0)