From 849fc80a30e9e8585d0e8040dd5e77547c9901cf Mon Sep 17 00:00:00 2001 From: isatyamks Date: Mon, 7 Oct 2024 21:50:34 +0530 Subject: [PATCH 01/16] Update computer_vision/README.md with OpenCV documentation link --- computer_vision/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/computer_vision/README.md b/computer_vision/README.md index 1657128fd25e..9f935b142c08 100644 --- a/computer_vision/README.md +++ b/computer_vision/README.md @@ -8,4 +8,4 @@ Image processing and computer vision are a little different from each other. Ima While computer vision comes from modelling image processing using the techniques of machine learning, computer vision applies machine learning to recognize patterns for interpretation of images (much like the process of visual reasoning of human vision). * -* +* From e5fbbdf02af69fc6c70b99da1e49dd7877114dc7 Mon Sep 17 00:00:00 2001 From: isatyamks Date: Mon, 7 Oct 2024 22:06:26 +0530 Subject: [PATCH 02/16] Update computer_vision/README.md with additional resource link --- computer_vision/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/computer_vision/README.md b/computer_vision/README.md index 9f935b142c08..1657128fd25e 100644 --- a/computer_vision/README.md +++ b/computer_vision/README.md @@ -8,4 +8,4 @@ Image processing and computer vision are a little different from each other. Ima While computer vision comes from modelling image processing using the techniques of machine learning, computer vision applies machine learning to recognize patterns for interpretation of images (much like the process of visual reasoning of human vision). * -* +* From 9178473e83cfd5f75e2eeea9aa3e3c521b6fd30a Mon Sep 17 00:00:00 2001 From: isatyamks Date: Mon, 7 Oct 2024 22:06:44 +0530 Subject: [PATCH 03/16] Add best_response_dynamics.py for implementing best response dynamics in game theory --- game_theory/best_response_dynamics.py | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) create mode 100644 game_theory/best_response_dynamics.py diff --git a/game_theory/best_response_dynamics.py b/game_theory/best_response_dynamics.py new file mode 100644 index 000000000000..62b9eb0d7138 --- /dev/null +++ b/game_theory/best_response_dynamics.py @@ -0,0 +1,26 @@ +def best_response_dynamics(payoff_matrix_a, payoff_matrix_b, iterations=10): + n = payoff_matrix_a.shape[0] + m = payoff_matrix_a.shape[1] + + # Initialize strategies + strategy_a = np.ones(n) / n + strategy_b = np.ones(m) / m + + for _ in range(iterations): + # Update strategy A + response_a = np.argmax(payoff_matrix_a @ strategy_b) + strategy_a = np.zeros(n) + strategy_a[response_a] = 1 + + # Update strategy B + response_b = np.argmax(payoff_matrix_b.T @ strategy_a) + strategy_b = np.zeros(m) + strategy_b[response_b] = 1 + + return strategy_a, strategy_b + +# Example usage +payoff_a = np.array([[3, 0], [5, 1]]) +payoff_b = np.array([[2, 4], [0, 2]]) +strategies = best_response_dynamics(payoff_a, payoff_b) +print("Final strategies:", strategies) From cdf63204e48681cd4db7bce956492d9676fdcaa0 Mon Sep 17 00:00:00 2001 From: isatyamks Date: Mon, 7 Oct 2024 22:07:06 +0530 Subject: [PATCH 04/16] Add game theory algorithms: fictitious play, minimax algorithm, and Nash equilibrium --- game_theory/fictitious_play.py | 32 ++++++++++++++++++++++++++++++++ game_theory/minimax_algorithm.py | 28 ++++++++++++++++++++++++++++ game_theory/nash_equlibrium.py | 30 ++++++++++++++++++++++++++++++ 3 files changed, 90 insertions(+) create mode 100644 game_theory/fictitious_play.py create mode 100644 game_theory/minimax_algorithm.py create mode 100644 game_theory/nash_equlibrium.py diff --git a/game_theory/fictitious_play.py b/game_theory/fictitious_play.py new file mode 100644 index 000000000000..d59fd73cb340 --- /dev/null +++ b/game_theory/fictitious_play.py @@ -0,0 +1,32 @@ +def fictitious_play(payoff_matrix_a, payoff_matrix_b, iterations=100): + n = payoff_matrix_a.shape[0] + m = payoff_matrix_a.shape[1] + + # Initialize counts and strategies + counts_a = np.zeros(n) + counts_b = np.zeros(m) + strategy_a = np.ones(n) / n + strategy_b = np.ones(m) / m + + for _ in range(iterations): + # Update counts + counts_a += strategy_a + counts_b += strategy_b + + # Calculate best responses + best_response_a = np.argmax(payoff_matrix_a @ strategy_b) + best_response_b = np.argmax(payoff_matrix_b.T @ strategy_a) + + # Update strategies + strategy_a = np.zeros(n) + strategy_a[best_response_a] = 1 + strategy_b = np.zeros(m) + strategy_b[best_response_b] = 1 + + return strategy_a, strategy_b + +# Example usage +payoff_a = np.array([[3, 0], [5, 1]]) +payoff_b = np.array([[2, 4], [0, 2]]) +strategies = fictitious_play(payoff_a, payoff_b) +print("Fictitious Play strategies:", strategies) diff --git a/game_theory/minimax_algorithm.py b/game_theory/minimax_algorithm.py new file mode 100644 index 000000000000..4e7db0e701c3 --- /dev/null +++ b/game_theory/minimax_algorithm.py @@ -0,0 +1,28 @@ +def minimax(depth, node_index, is_maximizing_player, values, alpha, beta): + if depth == 0: + return values[node_index] + + if is_maximizing_player: + best_value = float('-inf') + for i in range(2): # Two children (0 and 1) + value = minimax(depth - 1, node_index * 2 + i, False, values, alpha, beta) + best_value = max(best_value, value) + alpha = max(alpha, best_value) + if beta <= alpha: + break # Beta cut-off + return best_value + else: + best_value = float('inf') + for i in range(2): # Two children (0 and 1) + value = minimax(depth - 1, node_index * 2 + i, True, values, alpha, beta) + best_value = min(best_value, value) + beta = min(beta, best_value) + if beta <= alpha: + break # Alpha cut-off + return best_value + +# Example usage +values = [3, 5, 2, 9, 0, 1, 8, 6] # Leaf node values +depth = 3 # Depth of the game tree +result = minimax(depth, 0, True, values, float('-inf'), float('inf')) +print("The optimal value is:", result) diff --git a/game_theory/nash_equlibrium.py b/game_theory/nash_equlibrium.py new file mode 100644 index 000000000000..8218aac97963 --- /dev/null +++ b/game_theory/nash_equlibrium.py @@ -0,0 +1,30 @@ +import numpy as np +from scipy.optimize import linprog + +def find_nash_equilibrium(payoff_matrix_a, payoff_matrix_b): + n = payoff_matrix_a.shape[0] + m = payoff_matrix_a.shape[1] + + # Solve for player A + c = [-1] * n # Objective: maximize A's payoff + a_ub = -payoff_matrix_a # A's constraints + b_ub = [-1] * m + + result_a = linprog(c, A_ub=a_ub, b_ub=b_ub, bounds=(0, None)) + p_a = result_a.x + + # Solve for player B + c = [-1] * m # Objective: maximize B's payoff + a_ub = -payoff_matrix_b.T # B's constraints + b_ub = [-1] * n + + result_b = linprog(c, A_ub=a_ub, b_ub=b_ub, bounds=(0, None)) + p_b = result_b.x + + return p_a, p_b + +# Example usage +payoff_a = np.array([[3, 0], [5, 1]]) +payoff_b = np.array([[2, 4], [0, 2]]) +equilibrium = find_nash_equilibrium(payoff_a, payoff_b) +print("Nash Equilibrium strategies:", equilibrium) From 8362b29c5c94301a3d16e48fa28d5db4b7a49d54 Mon Sep 17 00:00:00 2001 From: isatyamks Date: Mon, 7 Oct 2024 22:07:32 +0530 Subject: [PATCH 05/16] Add shapley_value.py for calculating Shapley values in game theory --- game_theory/shapley_value.py | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) create mode 100644 game_theory/shapley_value.py diff --git a/game_theory/shapley_value.py b/game_theory/shapley_value.py new file mode 100644 index 000000000000..331cb3b1da2a --- /dev/null +++ b/game_theory/shapley_value.py @@ -0,0 +1,27 @@ +import numpy as np + +def shapley_value(payoff_matrix): + n = payoff_matrix.shape[0] # Number of players + shapley_values = np.zeros(n) # Initialize Shapley values + + # Iterate over each player + for i in range(n): + # Iterate over all subsets of players (from 0 to 2^n - 1) + for s in range(1 << n): # All subsets of players + if (s & (1 << i)) == 0: # If player i is not in subset S + continue + + # Calculate the value of the subset S without player i + s_without_i = s & ~(1 << i) # Remove player i from the subset + marginal_contribution = payoff_matrix[s][i] - (payoff_matrix[s_without_i][i] if s_without_i else 0) + + # Count the size of the subset S + size_of_s = bin(s).count('1') # Number of players in subset S + shapley_values[i] += marginal_contribution / (size_of_s * (n - size_of_s)) # Normalize by size of S + + return shapley_values + +# Example usage +payoff_matrix = np.array([[1, 2], [3, 4]]) +shapley_vals = shapley_value(payoff_matrix) +print("Shapley Values:", shapley_vals) From b4265350a202d5278cd47e36550566e7ff47e772 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 7 Oct 2024 16:40:34 +0000 Subject: [PATCH 06/16] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- game_theory/best_response_dynamics.py | 1 + game_theory/fictitious_play.py | 1 + game_theory/minimax_algorithm.py | 7 ++++--- game_theory/nash_equlibrium.py | 2 ++ game_theory/shapley_value.py | 14 ++++++++++---- 5 files changed, 18 insertions(+), 7 deletions(-) diff --git a/game_theory/best_response_dynamics.py b/game_theory/best_response_dynamics.py index 62b9eb0d7138..b2874e9ca771 100644 --- a/game_theory/best_response_dynamics.py +++ b/game_theory/best_response_dynamics.py @@ -19,6 +19,7 @@ def best_response_dynamics(payoff_matrix_a, payoff_matrix_b, iterations=10): return strategy_a, strategy_b + # Example usage payoff_a = np.array([[3, 0], [5, 1]]) payoff_b = np.array([[2, 4], [0, 2]]) diff --git a/game_theory/fictitious_play.py b/game_theory/fictitious_play.py index d59fd73cb340..1c1ef0976103 100644 --- a/game_theory/fictitious_play.py +++ b/game_theory/fictitious_play.py @@ -25,6 +25,7 @@ def fictitious_play(payoff_matrix_a, payoff_matrix_b, iterations=100): return strategy_a, strategy_b + # Example usage payoff_a = np.array([[3, 0], [5, 1]]) payoff_b = np.array([[2, 4], [0, 2]]) diff --git a/game_theory/minimax_algorithm.py b/game_theory/minimax_algorithm.py index 4e7db0e701c3..607ccce07119 100644 --- a/game_theory/minimax_algorithm.py +++ b/game_theory/minimax_algorithm.py @@ -3,7 +3,7 @@ def minimax(depth, node_index, is_maximizing_player, values, alpha, beta): return values[node_index] if is_maximizing_player: - best_value = float('-inf') + best_value = float("-inf") for i in range(2): # Two children (0 and 1) value = minimax(depth - 1, node_index * 2 + i, False, values, alpha, beta) best_value = max(best_value, value) @@ -12,7 +12,7 @@ def minimax(depth, node_index, is_maximizing_player, values, alpha, beta): break # Beta cut-off return best_value else: - best_value = float('inf') + best_value = float("inf") for i in range(2): # Two children (0 and 1) value = minimax(depth - 1, node_index * 2 + i, True, values, alpha, beta) best_value = min(best_value, value) @@ -21,8 +21,9 @@ def minimax(depth, node_index, is_maximizing_player, values, alpha, beta): break # Alpha cut-off return best_value + # Example usage values = [3, 5, 2, 9, 0, 1, 8, 6] # Leaf node values depth = 3 # Depth of the game tree -result = minimax(depth, 0, True, values, float('-inf'), float('inf')) +result = minimax(depth, 0, True, values, float("-inf"), float("inf")) print("The optimal value is:", result) diff --git a/game_theory/nash_equlibrium.py b/game_theory/nash_equlibrium.py index 8218aac97963..4daf6469506f 100644 --- a/game_theory/nash_equlibrium.py +++ b/game_theory/nash_equlibrium.py @@ -1,6 +1,7 @@ import numpy as np from scipy.optimize import linprog + def find_nash_equilibrium(payoff_matrix_a, payoff_matrix_b): n = payoff_matrix_a.shape[0] m = payoff_matrix_a.shape[1] @@ -23,6 +24,7 @@ def find_nash_equilibrium(payoff_matrix_a, payoff_matrix_b): return p_a, p_b + # Example usage payoff_a = np.array([[3, 0], [5, 1]]) payoff_b = np.array([[2, 4], [0, 2]]) diff --git a/game_theory/shapley_value.py b/game_theory/shapley_value.py index 331cb3b1da2a..3452d40496fa 100644 --- a/game_theory/shapley_value.py +++ b/game_theory/shapley_value.py @@ -1,5 +1,6 @@ import numpy as np + def shapley_value(payoff_matrix): n = payoff_matrix.shape[0] # Number of players shapley_values = np.zeros(n) # Initialize Shapley values @@ -13,14 +14,19 @@ def shapley_value(payoff_matrix): # Calculate the value of the subset S without player i s_without_i = s & ~(1 << i) # Remove player i from the subset - marginal_contribution = payoff_matrix[s][i] - (payoff_matrix[s_without_i][i] if s_without_i else 0) - + marginal_contribution = payoff_matrix[s][i] - ( + payoff_matrix[s_without_i][i] if s_without_i else 0 + ) + # Count the size of the subset S - size_of_s = bin(s).count('1') # Number of players in subset S - shapley_values[i] += marginal_contribution / (size_of_s * (n - size_of_s)) # Normalize by size of S + size_of_s = bin(s).count("1") # Number of players in subset S + shapley_values[i] += marginal_contribution / ( + size_of_s * (n - size_of_s) + ) # Normalize by size of S return shapley_values + # Example usage payoff_matrix = np.array([[1, 2], [3, 4]]) shapley_vals = shapley_value(payoff_matrix) From 19923cac5b643015b43cc650f91c5e0f38f73737 Mon Sep 17 00:00:00 2001 From: isatyamks Date: Mon, 7 Oct 2024 22:11:22 +0530 Subject: [PATCH 07/16] Add numpy import to best_response_dynamics.py --- game_theory/best_response_dynamics.py | 1 + 1 file changed, 1 insertion(+) diff --git a/game_theory/best_response_dynamics.py b/game_theory/best_response_dynamics.py index 62b9eb0d7138..464cef2d8f9f 100644 --- a/game_theory/best_response_dynamics.py +++ b/game_theory/best_response_dynamics.py @@ -1,3 +1,4 @@ +import numpy as np def best_response_dynamics(payoff_matrix_a, payoff_matrix_b, iterations=10): n = payoff_matrix_a.shape[0] m = payoff_matrix_a.shape[1] From 7f58338b9cfac4890998fff4d020e88a4828224d Mon Sep 17 00:00:00 2001 From: Satyam Kumar Date: Mon, 7 Oct 2024 22:12:52 +0530 Subject: [PATCH 08/16] Update best_response_dynamics.py --- game_theory/best_response_dynamics.py | 1 + 1 file changed, 1 insertion(+) diff --git a/game_theory/best_response_dynamics.py b/game_theory/best_response_dynamics.py index b2874e9ca771..0877c5b7c13c 100644 --- a/game_theory/best_response_dynamics.py +++ b/game_theory/best_response_dynamics.py @@ -1,3 +1,4 @@ +import numpy as np def best_response_dynamics(payoff_matrix_a, payoff_matrix_b, iterations=10): n = payoff_matrix_a.shape[0] m = payoff_matrix_a.shape[1] From 92646bc94a6e3b0c5bed700a7b11c90cb9d44aa5 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 7 Oct 2024 16:43:12 +0000 Subject: [PATCH 09/16] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- game_theory/best_response_dynamics.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/game_theory/best_response_dynamics.py b/game_theory/best_response_dynamics.py index 0877c5b7c13c..a3fb04468c70 100644 --- a/game_theory/best_response_dynamics.py +++ b/game_theory/best_response_dynamics.py @@ -1,4 +1,6 @@ import numpy as np + + def best_response_dynamics(payoff_matrix_a, payoff_matrix_b, iterations=10): n = payoff_matrix_a.shape[0] m = payoff_matrix_a.shape[1] From 9b0645d88844c5e8fe804f9d38693b49e2983e16 Mon Sep 17 00:00:00 2001 From: Satyam Kumar Date: Mon, 7 Oct 2024 22:16:52 +0530 Subject: [PATCH 10/16] Update fictitious_play.py --- game_theory/fictitious_play.py | 1 + 1 file changed, 1 insertion(+) diff --git a/game_theory/fictitious_play.py b/game_theory/fictitious_play.py index 1c1ef0976103..4740ebe130b7 100644 --- a/game_theory/fictitious_play.py +++ b/game_theory/fictitious_play.py @@ -1,3 +1,4 @@ +import numpy as np def fictitious_play(payoff_matrix_a, payoff_matrix_b, iterations=100): n = payoff_matrix_a.shape[0] m = payoff_matrix_a.shape[1] From af4427f312f995980af4098d06e0ba32ddad7938 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 7 Oct 2024 16:47:14 +0000 Subject: [PATCH 11/16] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- game_theory/fictitious_play.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/game_theory/fictitious_play.py b/game_theory/fictitious_play.py index 4740ebe130b7..906bc2debcc9 100644 --- a/game_theory/fictitious_play.py +++ b/game_theory/fictitious_play.py @@ -1,4 +1,6 @@ import numpy as np + + def fictitious_play(payoff_matrix_a, payoff_matrix_b, iterations=100): n = payoff_matrix_a.shape[0] m = payoff_matrix_a.shape[1] From d8e0f1ae1384c011c26da88c2fae1bed1f01fd9e Mon Sep 17 00:00:00 2001 From: isatyamks Date: Mon, 7 Oct 2024 22:46:01 +0530 Subject: [PATCH 12/16] __init__.py added --- game_theory/__init__.py | 0 game_theory/fictitious_play.py | 2 ++ 2 files changed, 2 insertions(+) create mode 100644 game_theory/__init__.py diff --git a/game_theory/__init__.py b/game_theory/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/game_theory/fictitious_play.py b/game_theory/fictitious_play.py index d59fd73cb340..9ff414e7fb61 100644 --- a/game_theory/fictitious_play.py +++ b/game_theory/fictitious_play.py @@ -1,3 +1,5 @@ +import numpy as np + def fictitious_play(payoff_matrix_a, payoff_matrix_b, iterations=100): n = payoff_matrix_a.shape[0] m = payoff_matrix_a.shape[1] From eec843721c0a51ccf7c958013acf023aeb7f85d9 Mon Sep 17 00:00:00 2001 From: isatyamks Date: Mon, 7 Oct 2024 22:48:36 +0530 Subject: [PATCH 13/16] Refactor best_response_dynamics.py and fictitious_play.py Remove unnecessary code and resolve merge conflicts --- game_theory/best_response_dynamics.py | 3 --- game_theory/fictitious_play.py | 4 ---- 2 files changed, 7 deletions(-) diff --git a/game_theory/best_response_dynamics.py b/game_theory/best_response_dynamics.py index 86f14b6f049a..a3fb04468c70 100644 --- a/game_theory/best_response_dynamics.py +++ b/game_theory/best_response_dynamics.py @@ -1,9 +1,6 @@ import numpy as np -<<<<<<< HEAD -======= ->>>>>>> af4427f312f995980af4098d06e0ba32ddad7938 def best_response_dynamics(payoff_matrix_a, payoff_matrix_b, iterations=10): n = payoff_matrix_a.shape[0] m = payoff_matrix_a.shape[1] diff --git a/game_theory/fictitious_play.py b/game_theory/fictitious_play.py index e9db897e1254..8317abfc04ef 100644 --- a/game_theory/fictitious_play.py +++ b/game_theory/fictitious_play.py @@ -1,9 +1,5 @@ import numpy as np -<<<<<<< HEAD -======= - ->>>>>>> af4427f312f995980af4098d06e0ba32ddad7938 def fictitious_play(payoff_matrix_a, payoff_matrix_b, iterations=100): n = payoff_matrix_a.shape[0] m = payoff_matrix_a.shape[1] From 5ca3fd312eed3221d36ccbda8d44bc455ac04818 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 7 Oct 2024 17:19:05 +0000 Subject: [PATCH 14/16] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- game_theory/fictitious_play.py | 1 + 1 file changed, 1 insertion(+) diff --git a/game_theory/fictitious_play.py b/game_theory/fictitious_play.py index 8317abfc04ef..906bc2debcc9 100644 --- a/game_theory/fictitious_play.py +++ b/game_theory/fictitious_play.py @@ -1,5 +1,6 @@ import numpy as np + def fictitious_play(payoff_matrix_a, payoff_matrix_b, iterations=100): n = payoff_matrix_a.shape[0] m = payoff_matrix_a.shape[1] From 3d8f0173599cbd8845712023d81d496ca5136ea8 Mon Sep 17 00:00:00 2001 From: Satyam Kumar Date: Mon, 7 Oct 2024 23:11:10 +0530 Subject: [PATCH 15/16] Update shapley_value.py --- game_theory/shapley_value.py | 15 +++++---------- 1 file changed, 5 insertions(+), 10 deletions(-) diff --git a/game_theory/shapley_value.py b/game_theory/shapley_value.py index 3452d40496fa..7561e17e099d 100644 --- a/game_theory/shapley_value.py +++ b/game_theory/shapley_value.py @@ -1,8 +1,7 @@ import numpy as np - def shapley_value(payoff_matrix): - n = payoff_matrix.shape[0] # Number of players + n = payoff_matrix.shape[1] # Number of players shapley_values = np.zeros(n) # Initialize Shapley values # Iterate over each player @@ -14,20 +13,16 @@ def shapley_value(payoff_matrix): # Calculate the value of the subset S without player i s_without_i = s & ~(1 << i) # Remove player i from the subset - marginal_contribution = payoff_matrix[s][i] - ( - payoff_matrix[s_without_i][i] if s_without_i else 0 - ) + marginal_contribution = payoff_matrix[s][i] - (payoff_matrix[s_without_i][i] if s_without_i else 0) # Count the size of the subset S size_of_s = bin(s).count("1") # Number of players in subset S - shapley_values[i] += marginal_contribution / ( - size_of_s * (n - size_of_s) - ) # Normalize by size of S + shapley_values[i] += marginal_contribution / (size_of_s * (n - size_of_s)) # Normalize by size of S return shapley_values - # Example usage -payoff_matrix = np.array([[1, 2], [3, 4]]) +# Payoff matrix with payoffs for 4 coalitions: {}, {1}, {2}, {1, 2} +payoff_matrix = np.array([[0, 0], [1, 0], [0, 2], [3, 4]]) shapley_vals = shapley_value(payoff_matrix) print("Shapley Values:", shapley_vals) From ed74af4e2b2b028d5066db6127de9c8c121fc369 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 7 Oct 2024 17:41:32 +0000 Subject: [PATCH 16/16] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- game_theory/shapley_value.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/game_theory/shapley_value.py b/game_theory/shapley_value.py index 7561e17e099d..c7db1f82d4fb 100644 --- a/game_theory/shapley_value.py +++ b/game_theory/shapley_value.py @@ -1,5 +1,6 @@ import numpy as np + def shapley_value(payoff_matrix): n = payoff_matrix.shape[1] # Number of players shapley_values = np.zeros(n) # Initialize Shapley values @@ -13,14 +14,19 @@ def shapley_value(payoff_matrix): # Calculate the value of the subset S without player i s_without_i = s & ~(1 << i) # Remove player i from the subset - marginal_contribution = payoff_matrix[s][i] - (payoff_matrix[s_without_i][i] if s_without_i else 0) + marginal_contribution = payoff_matrix[s][i] - ( + payoff_matrix[s_without_i][i] if s_without_i else 0 + ) # Count the size of the subset S size_of_s = bin(s).count("1") # Number of players in subset S - shapley_values[i] += marginal_contribution / (size_of_s * (n - size_of_s)) # Normalize by size of S + shapley_values[i] += marginal_contribution / ( + size_of_s * (n - size_of_s) + ) # Normalize by size of S return shapley_values + # Example usage # Payoff matrix with payoffs for 4 coalitions: {}, {1}, {2}, {1, 2} payoff_matrix = np.array([[0, 0], [1, 0], [0, 2], [3, 4]])