Skip to content
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.

Commit 106b6c7

Browse files
committedFeb 3, 2025
Updated code as per PR feedback 3
1 parent 2722754 commit 106b6c7

File tree

1 file changed

+15
-14
lines changed

1 file changed

+15
-14
lines changed
 

‎machine_learning/ridge_regression.py

Lines changed: 15 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -25,9 +25,10 @@ def ridge_cost_function(x: np.ndarray, y: np.ndarray, theta: np.ndarray, alpha:
2525
@returns: The computed cost value
2626
"""
2727
m = len(y)
28-
predictions = np.dot(X, theta)
29-
cost = (1 / (2 * m)) * np.sum((predictions - y) ** 2)
30-
cost += (alpha / 2) * np.sum(theta[1:] ** 2)
28+
predictions = np.dot(x, theta)
29+
cost = (1 / (2 * m)) * np.sum((predictions - y) ** 2) + \
30+
(alpha / 2) * np.sum(theta[1:] ** 2)
31+
3132
return cost
3233

3334
def ridge_gradient_descent(x: np.ndarray, y: np.ndarray, theta: np.ndarray, alpha: float, learning_rate: float, max_iterations: int) -> np.ndarray:
@@ -46,16 +47,16 @@ def ridge_gradient_descent(x: np.ndarray, y: np.ndarray, theta: np.ndarray, alph
4647
m = len(y)
4748

4849
for iteration in range(max_iterations):
49-
predictions = np.dot(X, theta)
50+
predictions = np.dot(x, theta)
5051
error = predictions - y
5152

5253
# calculate the gradient
53-
gradient = (1 / m) * np.dot(X.T, error)
54+
gradient = (1 / m) * np.dot(x.T, error)
5455
gradient[1:] += (alpha / m) * theta[1:]
5556
theta -= learning_rate * gradient
5657

5758
if iteration % 100 == 0:
58-
cost = ridge_cost_function(X, y, theta, alpha)
59+
cost = ridge_cost_function(x, y, theta, alpha)
5960
print(f"Iteration {iteration}, Cost: {cost}")
6061

6162
return theta
@@ -68,28 +69,28 @@ def ridge_gradient_descent(x: np.ndarray, y: np.ndarray, theta: np.ndarray, alph
6869

6970
# Load California Housing dataset
7071
california_housing = datasets.fetch_california_housing()
71-
X = california_housing.data[:, :2] # 2 features for simplicity
72+
x = california_housing.data[:, :2] # 2 features for simplicity
7273
y = california_housing.target
73-
X = (X - np.mean(X, axis=0)) / np.std(X, axis=0)
74+
x = (x - np.mean(x, axis=0)) / np.std(x, axis=0)
7475

7576
# Add a bias column (intercept) to X
76-
X = np.c_[np.ones(X.shape[0]), X]
77+
x = np.c_[np.ones(x.shape[0]), x]
7778

7879
# Initialize parameters (theta)
79-
theta_initial = np.zeros(X.shape[1])
80+
theta_initial = np.zeros(x.shape[1])
8081

8182
# Set hyperparameters
8283
alpha = 0.1
8384
learning_rate = 0.01
8485
max_iterations = 1000
8586

86-
optimized_theta = ridge_gradient_descent(X, y, theta_initial, alpha, learning_rate, max_iterations)
87+
optimized_theta = ridge_gradient_descent(x, y, theta_initial, alpha, learning_rate, max_iterations)
8788
print(f"Optimized theta: {optimized_theta}")
8889

8990
# Prediction
90-
def predict(X, theta):
91-
return np.dot(X, theta)
92-
y_pred = predict(X, optimized_theta)
91+
def predict(x, theta):
92+
return np.dot(x, theta)
93+
y_pred = predict(x, optimized_theta)
9394

9495
# Plotting the results (here we visualize predicted vs actual values)
9596
plt.figure(figsize=(10, 6))

0 commit comments

Comments
 (0)
Please sign in to comment.