Skip to content

Commit b143bfc

Browse files
authored
updated to lowercase
1 parent 51cf80c commit b143bfc

File tree

5 files changed

+151
-113
lines changed

5 files changed

+151
-113
lines changed

game_theory/best_response_dynamics.py

+26-27
Original file line numberDiff line numberDiff line change
@@ -1,27 +1,26 @@
1-
def best_response_dynamics(payoff_matrix_A, payoff_matrix_B, iterations=10):
2-
n = payoff_matrix_A.shape[0]
3-
m = payoff_matrix_A.shape[1]
4-
5-
# Initialize strategies
6-
strategy_A = np.ones(n) / n
7-
strategy_B = np.ones(m) / m
8-
9-
for _ in range(iterations):
10-
# Update strategy A
11-
response_A = np.argmax(payoff_matrix_A @ strategy_B)
12-
strategy_A = np.zeros(n)
13-
strategy_A[response_A] = 1
14-
15-
# Update strategy B
16-
response_B = np.argmax(payoff_matrix_B.T @ strategy_A)
17-
strategy_B = np.zeros(m)
18-
strategy_B[response_B] = 1
19-
20-
return strategy_A, strategy_B
21-
22-
23-
# Example usage
24-
payoff_A = np.array([[3, 0], [5, 1]])
25-
payoff_B = np.array([[2, 4], [0, 2]])
26-
strategies = best_response_dynamics(payoff_A, payoff_B)
27-
print("Final strategies:", strategies)
1+
def best_response_dynamics(payoff_matrix_a, payoff_matrix_b, iterations=10):
2+
n = payoff_matrix_a.shape[0]
3+
m = payoff_matrix_a.shape[1]
4+
5+
# Initialize strategies
6+
strategy_a = np.ones(n) / n
7+
strategy_b = np.ones(m) / m
8+
9+
for _ in range(iterations):
10+
# Update strategy A
11+
response_a = np.argmax(payoff_matrix_a @ strategy_b)
12+
strategy_a = np.zeros(n)
13+
strategy_a[response_a] = 1
14+
15+
# Update strategy B
16+
response_b = np.argmax(payoff_matrix_b.T @ strategy_a)
17+
strategy_b = np.zeros(m)
18+
strategy_b[response_b] = 1
19+
20+
return strategy_a, strategy_b
21+
22+
# Example usage
23+
payoff_a = np.array([[3, 0], [5, 1]])
24+
payoff_b = np.array([[2, 4], [0, 2]])
25+
strategies = best_response_dynamics(payoff_a, payoff_b)
26+
print("Final strategies:", strategies)

game_theory/fictitious_play.py

+33-33
Original file line numberDiff line numberDiff line change
@@ -1,33 +1,33 @@
1-
def fictitious_play(payoff_matrix_A, payoff_matrix_B, iterations=100):
2-
n = payoff_matrix_A.shape[0]
3-
m = payoff_matrix_A.shape[1]
4-
5-
# Initialize counts and strategies
6-
counts_A = np.zeros(n)
7-
counts_B = np.zeros(m)
8-
strategy_A = np.ones(n) / n
9-
strategy_B = np.ones(m) / m
10-
11-
for _ in range(iterations):
12-
# Update counts
13-
counts_A += strategy_A
14-
counts_B += strategy_B
15-
16-
# Calculate best responses
17-
best_response_A = np.argmax(payoff_matrix_A @ strategy_B)
18-
best_response_B = np.argmax(payoff_matrix_B.T @ strategy_A)
19-
20-
# Update strategies
21-
strategy_A = np.zeros(n)
22-
strategy_A[best_response_A] = 1
23-
strategy_B = np.zeros(m)
24-
strategy_B[best_response_B] = 1
25-
26-
return strategy_A, strategy_B
27-
28-
29-
# Example usage
30-
payoff_A = np.array([[3, 0], [5, 1]])
31-
payoff_B = np.array([[2, 4], [0, 2]])
32-
strategies = fictitious_play(payoff_A, payoff_B)
33-
print("Fictitious Play strategies:", strategies)
1+
def fictitious_play(payoff_matrix_a, payoff_matrix_b, iterations=100):
2+
n = payoff_matrix_a.shape[0]
3+
m = payoff_matrix_a.shape[1]
4+
5+
# Initialize counts and strategies
6+
counts_a = np.zeros(n)
7+
counts_b = np.zeros(m)
8+
strategy_a = np.ones(n) / n
9+
strategy_b = np.ones(m) / m
10+
11+
for _ in range(iterations):
12+
# Update counts
13+
counts_a += strategy_a
14+
counts_b += strategy_b
15+
16+
# Calculate best responses
17+
best_response_a = np.argmax(payoff_matrix_a @ strategy_b)
18+
best_response_b = np.argmax(payoff_matrix_b.T @ strategy_a)
19+
20+
# Update strategies
21+
strategy_a = np.zeros(n)
22+
strategy_a[best_response_a] = 1
23+
strategy_b = np.zeros(m)
24+
strategy_b[best_response_b] = 1
25+
26+
return strategy_a, strategy_b
27+
28+
29+
# Example usage
30+
payoff_a = np.array([[3, 0], [5, 1]])
31+
payoff_b = np.array([[2, 4], [0, 2]])
32+
strategies = fictitious_play(payoff_a, payoff_b)
33+
print("Fictitious Play strategies:", strategies)

game_theory/minimax_algorithm.py

+29-29
Original file line numberDiff line numberDiff line change
@@ -1,29 +1,29 @@
1-
def minimax(depth, node_index, is_maximizing_player, values, alpha, beta):
2-
if depth == 0:
3-
return values[node_index]
4-
5-
if is_maximizing_player:
6-
best_value = float("-inf")
7-
for i in range(2): # Two children (0 and 1)
8-
value = minimax(depth - 1, node_index * 2 + i, False, values, alpha, beta)
9-
best_value = max(best_value, value)
10-
alpha = max(alpha, best_value)
11-
if beta <= alpha:
12-
break # Beta cut-off
13-
return best_value
14-
else:
15-
best_value = float("inf")
16-
for i in range(2): # Two children (0 and 1)
17-
value = minimax(depth - 1, node_index * 2 + i, True, values, alpha, beta)
18-
best_value = min(best_value, value)
19-
beta = min(beta, best_value)
20-
if beta <= alpha:
21-
break # Alpha cut-off
22-
return best_value
23-
24-
25-
# Example usage
26-
values = [3, 5, 2, 9, 0, 1, 8, 6] # Leaf node values
27-
depth = 3 # Depth of the game tree
28-
result = minimax(depth, 0, True, values, float("-inf"), float("inf"))
29-
print("The optimal value is:", result)
1+
def minimax(depth, node_index, is_maximizing_player, values, alpha, beta):
2+
if depth == 0:
3+
return values[node_index]
4+
5+
if is_maximizing_player:
6+
best_value = float("-inf")
7+
for i in range(2): # Two children (0 and 1)
8+
value = minimax(depth - 1, node_index * 2 + i, False, values, alpha, beta)
9+
best_value = max(best_value, value)
10+
alpha = max(alpha, best_value)
11+
if beta <= alpha:
12+
break # Beta cut-off
13+
return best_value
14+
else:
15+
best_value = float("inf")
16+
for i in range(2): # Two children (0 and 1)
17+
value = minimax(depth - 1, node_index * 2 + i, True, values, alpha, beta)
18+
best_value = min(best_value, value)
19+
beta = min(beta, best_value)
20+
if beta <= alpha:
21+
break # Alpha cut-off
22+
return best_value
23+
24+
25+
# Example usage
26+
values = [3, 5, 2, 9, 0, 1, 8, 6] # Leaf node values
27+
depth = 3 # Depth of the game tree
28+
result = minimax(depth, 0, True, values, float("-inf"), float("inf"))
29+
print("The optimal value is:", result)

game_theory/nash_equilibrium.py

+33
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,35 @@
1+
<<<<<<< HEAD
2+
import numpy as np
3+
from scipy.optimize import linprog
4+
5+
def find_nash_equilibrium(payoff_matrix_a, payoff_matrix_b):
6+
n = payoff_matrix_a.shape[0]
7+
m = payoff_matrix_a.shape[1]
8+
9+
# Solve for player A
10+
c = [-1] * n # Objective: maximize A's payoff
11+
a_ub = -payoff_matrix_a # A's constraints
12+
b_ub = [-1] * m
13+
14+
result_a = linprog(c, A_ub=a_ub, b_ub=b_ub, bounds=(0, None))
15+
p_a = result_a.x
16+
17+
# Solve for player B
18+
c = [-1] * m # Objective: maximize B's payoff
19+
a_ub = -payoff_matrix_b.T # B's constraints
20+
b_ub = [-1] * n
21+
22+
result_b = linprog(c, A_ub=a_ub, b_ub=b_ub, bounds=(0, None))
23+
p_b = result_b.x
24+
25+
return p_a, p_b
26+
27+
# Example usage
28+
payoff_a = np.array([[3, 0], [5, 1]])
29+
payoff_b = np.array([[2, 4], [0, 2]])
30+
equilibrium = find_nash_equilibrium(payoff_a, payoff_b)
31+
print("Nash Equilibrium strategies:", equilibrium)
32+
=======
133
import numpy as np
234
from scipy.optimize import linprog
335

@@ -30,3 +62,4 @@ def find_nash_equilibrium(payoff_matrix_A, payoff_matrix_B):
3062
payoff_B = np.array([[2, 4], [0, 2]])
3163
equilibrium = find_nash_equilibrium(payoff_A, payoff_B)
3264
print("Nash Equilibrium strategies:", equilibrium)
65+
>>>>>>> 51cf80c355f4a1fbfba6aa04bbb0fdf1292dcb2f

game_theory/shapley_value.py

+30-24
Original file line numberDiff line numberDiff line change
@@ -1,24 +1,30 @@
1-
def shapley_value(payoff_matrix):
2-
n = payoff_matrix.shape[0]
3-
shapley_values = np.zeros(n)
4-
5-
for i in range(n):
6-
for S in range(1 << n): # All subsets of players
7-
if (S & (1 << i)) == 0: # i not in S
8-
continue
9-
10-
S_without_i = S & ~(1 << i)
11-
marginal_contribution = payoff_matrix[S][i] - (
12-
payoff_matrix[S_without_i][i] if S_without_i else 0
13-
)
14-
shapley_values[i] += marginal_contribution / (
15-
len(bin(S)) - 2
16-
) # Normalize by size of S
17-
18-
return shapley_values
19-
20-
21-
# Example usage
22-
payoff_matrix = np.array([[1, 2], [3, 4]])
23-
shapley_vals = shapley_value(payoff_matrix)
24-
print("Shapley Values:", shapley_vals)
1+
def shapley_value(payoff_matrix):
2+
n = payoff_matrix.shape[0]
3+
shapley_values = np.zeros(n)
4+
5+
for i in range(n):
6+
for s in range(1 << n): # All subsets of players
7+
if (s & (1 << i)) == 0: # i not in S
8+
continue
9+
10+
<<<<<<< HEAD
11+
s_without_i = s & ~(1 << i)
12+
marginal_contribution = payoff_matrix[s][i] - (payoff_matrix[s_without_i][i] if s_without_i else 0)
13+
shapley_values[i] += marginal_contribution / (len(bin(s)) - 2) # Normalize by size of S
14+
=======
15+
S_without_i = S & ~(1 << i)
16+
marginal_contribution = payoff_matrix[S][i] - (
17+
payoff_matrix[S_without_i][i] if S_without_i else 0
18+
)
19+
shapley_values[i] += marginal_contribution / (
20+
len(bin(S)) - 2
21+
) # Normalize by size of S
22+
>>>>>>> 51cf80c355f4a1fbfba6aa04bbb0fdf1292dcb2f
23+
24+
return shapley_values
25+
26+
27+
# Example usage
28+
payoff_matrix = np.array([[1, 2], [3, 4]])
29+
shapley_vals = shapley_value(payoff_matrix)
30+
print("Shapley Values:", shapley_vals)

0 commit comments

Comments
 (0)