|
| 1 | +import numpy as np |
| 2 | +import requests |
| 3 | + |
| 4 | +def collect_dataset(): |
| 5 | + """Collect dataset of CSGO |
| 6 | + The dataset contains ADR vs Rating of a Player |
| 7 | + :return : dataset obtained from the link, as matrix |
| 8 | + """ |
| 9 | + response = requests.get( |
| 10 | + "https://raw.githubusercontent.com/yashLadha/The_Math_of_Intelligence/" |
| 11 | + "master/Week1/ADRvsRating.csv", |
| 12 | + timeout=10, |
| 13 | + ) |
| 14 | + lines = response.text.splitlines() |
| 15 | + data = [] |
| 16 | + for item in lines: |
| 17 | + item = item.split(",") |
| 18 | + data.append(item) |
| 19 | + data.pop(0) # This is for removing the labels from the list |
| 20 | + dataset = np.matrix(data) |
| 21 | + return dataset |
| 22 | + |
| 23 | +def run_steep_gradient_descent(data_x, data_y, len_data, alpha, theta, lambda_reg): |
| 24 | + """Run steep gradient descent and updates the Feature vector accordingly |
| 25 | + :param data_x : contains the dataset |
| 26 | + :param data_y : contains the output associated with each data-entry |
| 27 | + :param len_data : length of the data |
| 28 | + :param alpha : Learning rate of the model |
| 29 | + :param theta : Feature vector (weights for our model) |
| 30 | + :param lambda_reg: Regularization parameter |
| 31 | + :return : Updated Features using |
| 32 | + curr_features - alpha_ * gradient(w.r.t. feature) |
| 33 | + """ |
| 34 | + n = len_data |
| 35 | + |
| 36 | + prod = np.dot(theta, data_x.transpose()) |
| 37 | + prod -= data_y.transpose() |
| 38 | + sum_grad = np.dot(prod, data_x) |
| 39 | + |
| 40 | + # Add regularization to the gradient |
| 41 | + theta_regularized = np.copy(theta) |
| 42 | + theta_regularized[0, 0] = 0 # Don't regularize the bias term |
| 43 | + sum_grad += lambda_reg * theta_regularized # Add regularization to gradient |
| 44 | + |
| 45 | + theta = theta - (alpha / n) * sum_grad |
| 46 | + return theta |
| 47 | + |
| 48 | +def sum_of_square_error(data_x, data_y, len_data, theta, lambda_reg): |
| 49 | + """Return sum of square error for error calculation |
| 50 | + :param data_x : contains our dataset |
| 51 | + :param data_y : contains the output (result vector) |
| 52 | + :param len_data : len of the dataset |
| 53 | + :param theta : contains the feature vector |
| 54 | + :param lambda_reg: Regularization parameter |
| 55 | + :return : sum of square error computed from given features |
| 56 | + """ |
| 57 | + prod = np.dot(theta, data_x.transpose()) |
| 58 | + prod -= data_y.transpose() |
| 59 | + sum_elem = np.sum(np.square(prod)) |
| 60 | + |
| 61 | + # Add regularization to the cost function |
| 62 | + regularization_term = lambda_reg * np.sum(np.square(theta[:, 1:])) # Don't regularize the bias term |
| 63 | + error = (sum_elem / (2 * len_data)) + (regularization_term / (2 * len_data)) |
| 64 | + return error |
| 65 | + |
| 66 | +def run_ridge_regression(data_x, data_y, lambda_reg=1.0): |
| 67 | + """Implement Ridge Regression over the dataset |
| 68 | + :param data_x : contains our dataset |
| 69 | + :param data_y : contains the output (result vector) |
| 70 | + :param lambda_reg: Regularization parameter |
| 71 | + :return : feature for line of best fit (Feature vector) |
| 72 | + """ |
| 73 | + iterations = 100000 |
| 74 | + alpha = 0.0001550 |
| 75 | + |
| 76 | + no_features = data_x.shape[1] |
| 77 | + len_data = data_x.shape[0] |
| 78 | + |
| 79 | + theta = np.zeros((1, no_features)) |
| 80 | + |
| 81 | + for i in range(iterations): |
| 82 | + theta = run_steep_gradient_descent(data_x, data_y, len_data, alpha, theta, lambda_reg) |
| 83 | + error = sum_of_square_error(data_x, data_y, len_data, theta, lambda_reg) |
| 84 | + print(f"At Iteration {i + 1} - Error is {error:.5f}") |
| 85 | + |
| 86 | + return theta |
| 87 | + |
| 88 | +def mean_absolute_error(predicted_y, original_y): |
| 89 | + """Return mean absolute error for error calculation |
| 90 | + :param predicted_y : contains the output of prediction (result vector) |
| 91 | + :param original_y : contains values of expected outcome |
| 92 | + :return : mean absolute error computed from given features |
| 93 | + """ |
| 94 | + total = sum(abs(y - predicted_y[i]) for i, y in enumerate(original_y)) |
| 95 | + return total / len(original_y) |
| 96 | + |
| 97 | +def main(): |
| 98 | + """Driver function""" |
| 99 | + data = collect_dataset() |
| 100 | + |
| 101 | + len_data = data.shape[0] |
| 102 | + data_x = np.c_[np.ones(len_data), data[:, :-1]].astype(float) |
| 103 | + data_y = data[:, -1].astype(float) |
| 104 | + |
| 105 | + lambda_reg = 1.0 # Set your desired regularization parameter |
| 106 | + theta = run_ridge_regression(data_x, data_y, lambda_reg) |
| 107 | + |
| 108 | + len_result = theta.shape[1] |
| 109 | + print("Resultant Feature vector : ") |
| 110 | + for i in range(len_result): |
| 111 | + print(f"{theta[0, i]:.5f}") |
| 112 | + |
| 113 | +if __name__ == "__main__": |
| 114 | + main() |
| 115 | + |
0 commit comments