Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 6 additions & 6 deletions .github/workflows/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
python-version: ["3.11", "3.12", "3.13"]
python-version: ["3.10", "3.11", "3.12", "3.13"]
fail-fast: false
defaults:
run:
Expand All @@ -49,12 +49,12 @@ jobs:
- name: Check typing
run: uv run pyright


- name: Upload coverage reports to Codecov with GitHub Action on Python 3.11
uses: codecov/codecov-action@v4
if: ${{ matrix.python-version == '3.11' }}
- name: Upload coeverage reports to Coveralls
uses: coverallsapp/github-action@v2
with:
path-to-lcov: coverage.xml
env:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
COVERALLS_REPO_TOKEN: ${{ secrets.COVERALLS_REPO_TOKEN }}

check-docs:
runs-on: ubuntu-latest
Expand Down
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

[![Release](https://img.shields.io/github/v/release/automl/HyperSHAP)](https://img.shields.io/github/v/release/automl/hypershap)
[![Build status](https://img.shields.io/github/actions/workflow/status/automl/hypershap/main.yml?branch=main)](https://github.com/automl/hypershap/actions/workflows/main.yml?query=branch%3Amain)
[![codecov](https://codecov.io/gh/automl/hypershap/branch/main/graph/badge.svg)](https://codecov.io/gh/automl/hypershap)
[![Coverage Status](https://coveralls.io/repos/github/automl/HyperSHAP/badge.svg?branch=dev)](https://coveralls.io/github/automl/HyperSHAP?branch=dev)
[![Commit activity](https://img.shields.io/github/commit-activity/m/automl/hypershap)](https://img.shields.io/github/commit-activity/m/automl/hypershap)
[![License](https://img.shields.io/github/license/automl/hypershap)](https://img.shields.io/github/license/automl/hypershap)

Expand Down
7 changes: 4 additions & 3 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -5,22 +5,23 @@ description = "HyperSHAP is a post-hoc explanation method for hyperparameter opt
authors = [{ name = "Marcel Wever", email = "m.wever@ai.uni-hannover.de" }]
readme = "README.md"
keywords = ['python']
requires-python = ">=3.11,<4.0"
requires-python = ">=3.10,<4.0"
classifiers = [
"Intended Audience :: Developers",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3.12",
"Programming Language :: Python :: 3.13",
"Topic :: Software Development :: Libraries :: Python Modules",
]
dependencies = [
"shapiq>=1.2.3",
"numpy>=2.3.2",
"numpy>=2.2.6",
"scikit-learn>=1.7.1",
"matplotlib>=3.10.5",
"networkx>=3.5"
"networkx>=3.4.2",
]

[project.urls]
Expand Down
7 changes: 4 additions & 3 deletions src/hypershap/games/ablation.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,8 +81,8 @@ def evaluate_single_coalition(self, coalition: np.ndarray) -> float:
res = self._get_explanation_task().surrogate_model.evaluate(blend)

# validate that we do not get a list of floats by accident
if isinstance(res, list):
raise TypeError
if isinstance(res, list): # pragma: no cover
raise TypeError # pragma: no cover

return res

Expand All @@ -98,4 +98,5 @@ def _get_explanation_task(self) -> AblationExplanationTask:
"""
if isinstance(self.explanation_task, AblationExplanationTask):
return self.explanation_task
raise ValueError

raise ValueError # pragma: no cover
2 changes: 1 addition & 1 deletion src/hypershap/games/optimizerbias.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,7 @@ def __init__(
def _get_explanation_task(self) -> OptimizerBiasExplanationTask:
if isinstance(self.explanation_task, OptimizerBiasExplanationTask):
return self.explanation_task
raise ValueError
raise ValueError # pragma: no cover

def evaluate_single_coalition(self, coalition: np.ndarray) -> float:
"""Evaluate a single coalition by comparing against an optimizer ensemble.
Expand Down
49 changes: 35 additions & 14 deletions src/hypershap/hypershap.py
Original file line number Diff line number Diff line change
Expand Up @@ -301,12 +301,18 @@ def optimizer_bias(
og = OptimizerBiasGame(explanation_task=optimizer_bias_task, n_workers=self.n_workers, verbose=self.verbose)
return self.__get_interaction_values(game=og, index=index, order=order)

def plot_si_graph(self, interaction_values: InteractionValues | None = None, save_path: str | None = None) -> None:
def plot_si_graph(
self,
interaction_values: InteractionValues | None = None,
save_path: str | None = None,
no_show: bool | None = None,
) -> None:
"""Plot the SHAP interaction values as a graph.

Args:
interaction_values (InteractionValues | None, optional): The interaction values to plot. Defaults to None.
save_path (str | None, optional): The path to save the plot. Defaults to None.
no_show (bool | None, optional): Do not show the plot if set to true. Defaults to None.

"""
if interaction_values is None and self.last_interaction_values is None:
Expand All @@ -315,8 +321,8 @@ def plot_si_graph(self, interaction_values: InteractionValues | None = None, sav
# if given interaction values use those, else use cached interaction values
iv = interaction_values if interaction_values is not None else self.last_interaction_values

if not isinstance(iv, InteractionValues):
raise TypeError
if not isinstance(iv, InteractionValues): # pragma: no cover
raise TypeError # pragma: no cover

hyperparameter_names = self.explanation_task.get_hyperparameter_names()

Expand All @@ -342,14 +348,21 @@ def get_circular_layout(n_players: int) -> dict:
plt.savefig(save_path)
logger.info("Saved SI graph to %s", save_path)

plt.show()
if no_show is None or not no_show: # pragma: no cover
plt.show() # pragma: no cover

def plot_upset(self, interaction_values: InteractionValues | None = None, save_path: str | None = None) -> None:
def plot_upset(
self,
interaction_values: InteractionValues | None = None,
save_path: str | None = None,
no_show: bool | None = None,
) -> None:
"""Plot the SHAP interaction values as an upset plot graph.

Args:
interaction_values (InteractionValues | None, optional): The interaction values to plot. Defaults to None.
save_path (str | None, optional): The path to save the plot. Defaults to None.
no_show (bool | None, optional): Do not show the plot if set to true. Defaults to None.

"""
if interaction_values is None and self.last_interaction_values is None:
Expand All @@ -358,15 +371,15 @@ def plot_upset(self, interaction_values: InteractionValues | None = None, save_p
# if given interaction values use those, else use cached interaction values
iv = interaction_values if interaction_values is not None else self.last_interaction_values

if not isinstance(iv, InteractionValues):
raise TypeError
if not isinstance(iv, InteractionValues): # pragma: no cover
raise TypeError # pragma: no cover

hyperparameter_names = self.explanation_task.get_hyperparameter_names()

fig = iv.plot_upset(feature_names=hyperparameter_names, show=False)

if fig is None:
raise TypeError
if fig is None: # pragma: no cover
raise TypeError # pragma: no cover

ax = fig.get_axes()[0]
ax.set_ylabel("Performance Gain")
Expand All @@ -379,14 +392,21 @@ def plot_upset(self, interaction_values: InteractionValues | None = None, save_p
if save_path is not None:
plt.savefig(save_path)

plt.show()
if no_show is None or not no_show: # pragma: no cover
plt.show() # pragma: no cover

def plot_force(self, interaction_values: InteractionValues | None = None, save_path: str | None = None) -> None:
def plot_force(
self,
interaction_values: InteractionValues | None = None,
save_path: str | None = None,
no_show: bool | None = None,
) -> None:
"""Plot the SHAP interaction values as a forceplot graph.

Args:
interaction_values: Interaction values to plot. Defaults to None.
save_path: The path to save the plot. Defaults to None.
no_show (bool | None, optional): Do not show the plot if set to true. Defaults to None.

"""
if interaction_values is None and self.last_interaction_values is None:
Expand All @@ -395,8 +415,8 @@ def plot_force(self, interaction_values: InteractionValues | None = None, save_p
# if given interaction values use those, else use cached interaction values
iv = interaction_values if interaction_values is not None else self.last_interaction_values

if not isinstance(iv, InteractionValues):
raise TypeError
if not isinstance(iv, InteractionValues): # pragma: no cover
raise TypeError # pragma: no cover

hyperparameter_names = self.explanation_task.get_hyperparameter_names()

Expand All @@ -406,4 +426,5 @@ def plot_force(self, interaction_values: InteractionValues | None = None, save_p
if save_path is not None:
plt.savefig(save_path)

plt.show()
if no_show is None or not no_show: # pragma: no cover
plt.show() # pragma: no cover
16 changes: 8 additions & 8 deletions src/hypershap/surrogate_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -106,8 +106,8 @@ def evaluate_config(self, config: Configuration) -> float:

"""
res = self.evaluate(np.array(config.get_array()))
if not isinstance(res, float):
raise TypeError
if not isinstance(res, float): # pragma: no cover
raise TypeError # pragma: no cover
return res

def evaluate_config_batch(self, config_batch: list[Configuration]) -> list[float]:
Expand All @@ -121,8 +121,8 @@ def evaluate_config_batch(self, config_batch: list[Configuration]) -> list[float

"""
res = self.evaluate(np.array([config.get_array() for config in config_batch]))
if not isinstance(res, list):
raise TypeError
if not isinstance(res, list): # pragma: no cover
raise TypeError # pragma: no cover
return res

@abstractmethod
Expand Down Expand Up @@ -163,8 +163,8 @@ def evaluate_config(self, config: Configuration) -> float:

"""
res = self.evaluate(config.get_array())
if not isinstance(res, float):
raise TypeError
if not isinstance(res, float): # pragma: no cover
raise TypeError # pragma: no cover
return res

def evaluate_config_batch(self, config_batch: list[Configuration]) -> list[float]:
Expand All @@ -178,8 +178,8 @@ def evaluate_config_batch(self, config_batch: list[Configuration]) -> list[float

"""
res = self.evaluate(np.array([config.get_array() for config in config_batch]))
if not isinstance(res, list):
raise TypeError
if not isinstance(res, list): # pragma: no cover
raise TypeError # pragma: no cover
return res

def evaluate(self, config_array: np.ndarray) -> float | list[float]:
Expand Down
1 change: 1 addition & 0 deletions tests/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
"""Tests for the HyperSHAP module."""
7 changes: 7 additions & 0 deletions tests/conftest.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
"""Conftest with all pytest plugins for HyperSHAP."""

from __future__ import annotations

pytest_plugins = [
"tests.fixtures.simple_setup",
]
1 change: 1 addition & 0 deletions tests/fixtures/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
"""Fixtures for the HyperSHAP tests."""
72 changes: 72 additions & 0 deletions tests/fixtures/simple_setup.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,72 @@
"""The module contains simple setup fixtures for more convenient testing."""

from __future__ import annotations

import pytest
from ConfigSpace import Configuration, ConfigurationSpace, UniformFloatHyperparameter

from hypershap import ExplanationTask


@pytest.fixture(scope="session")
def simple_config_space() -> ConfigurationSpace:
"""Return a simple config space for testing."""
config_space = ConfigurationSpace()
config_space.add(UniformFloatHyperparameter("a", 0, 1, 0))
config_space.add(UniformFloatHyperparameter("b", 0, 1, 0))
return config_space


class SimpleBlackboxFunction:
"""A very simple black box function for testing."""

def __init__(self, a_coeff: float, b_coeff: float) -> None:
"""Initialize the simple black box function.

Args:
a_coeff: Coefficient for hyperparameter a.
b_coeff: Coefficient for hyperparameter b.

"""
self.a_coeff = a_coeff
self.b_coeff = b_coeff

def evaluate(self, x: Configuration) -> float:
"""Evaluate the value of a configuration.

Args:
x: The configuration to be evaluated.

Returns: The value of the configuration.

"""
return self.value(x["a"], x["b"])

def value(self, a: float, b: float) -> float:
"""Evaluate the value of a configuration.

Args:
a: The value for hyperparameter a.
b: The value for hyperparameter b.

"""
return self.a_coeff * a + self.b_coeff * b


@pytest.fixture(scope="session")
def simple_blackbox_function() -> SimpleBlackboxFunction:
"""Return a simple blackbox function for testing.

Returns: The simple blackbox function.

"""
return SimpleBlackboxFunction(0.7, 2.0)


@pytest.fixture(scope="session")
def simple_base_et(
simple_config_space: ConfigurationSpace,
simple_blackbox_function: SimpleBlackboxFunction,
) -> ExplanationTask:
"""Return a base explanation task for the simple setup."""
return ExplanationTask.from_function(simple_config_space, simple_blackbox_function.evaluate)
3 changes: 3 additions & 0 deletions tests/test_ablation_game.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,3 +70,6 @@ def evaluate(self, config: np.ndarray) -> float:
surrogate_model.last_queried_config,
"Mismatch between expected config and actual config",
)

game_hp_names = ablation_game.get_hyperparameter_names()
assert game_hp_names == ["a", "b", "c"], "Hyperparameter names mismatch"
Loading