1
1
import numpy as np
2
2
import requests
3
3
4
+
4
5
def collect_dataset ():
5
6
"""Collect dataset of CSGO (ADR vs Rating of a Player)"""
6
7
response = requests .get (
@@ -10,35 +11,40 @@ def collect_dataset():
10
11
data = np .loadtxt (response .text .splitlines ()[1 :], delimiter = "," ) # Skip the header
11
12
return data
12
13
14
+
13
15
def normalize_features (data ):
14
16
"""Normalize feature values to have mean 0 and variance 1"""
15
17
means = np .mean (data [:, :- 1 ], axis = 0 )
16
18
stds = np .std (data [:, :- 1 ], axis = 0 )
17
19
data [:, :- 1 ] = (data [:, :- 1 ] - means ) / stds
18
20
return data
19
21
22
+
20
23
def run_gradient_descent (data_x , data_y , alpha = 0.01 , iterations = 1000 , batch_size = 32 ):
21
24
"""Run gradient descent with mini-batch optimization"""
22
25
len_data , no_features = data_x .shape
23
26
theta = np .zeros (no_features )
24
27
25
28
for i in range (iterations ):
26
- indices = np .random .choice (len_data , batch_size , replace = False ) # Randomly sample indices
29
+ indices = np .random .choice (
30
+ len_data , batch_size , replace = False
31
+ ) # Randomly sample indices
27
32
x_batch = data_x [indices ]
28
33
y_batch = data_y [indices ]
29
34
30
35
predictions = x_batch @ theta # Vectorized predictions
31
36
errors = predictions - y_batch
32
-
37
+
33
38
gradient = (1 / batch_size ) * (x_batch .T @ errors ) # Vectorized gradient
34
39
theta -= alpha * gradient # Update theta
35
-
40
+
36
41
if i % 100 == 0 : # Print error every 100 iterations
37
- error = np .mean (errors ** 2 ) # Mean Squared Error
42
+ error = np .mean (errors ** 2 ) # Mean Squared Error
38
43
print (f"Iteration { i } : MSE = { error :.5f} " )
39
44
40
45
return theta
41
46
47
+
42
48
def main ():
43
49
"""Driver function"""
44
50
data = collect_dataset ()
@@ -52,5 +58,6 @@ def main():
52
58
print ("Resultant Feature vector : " )
53
59
print (theta )
54
60
61
+
55
62
if __name__ == "__main__" :
56
63
main ()
0 commit comments