/
partial_dependence.py
1473 lines (1283 loc) · 58.6 KB
/
partial_dependence.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
import numbers
from itertools import chain
from math import ceil
import numpy as np
from scipy import sparse
from scipy.stats.mstats import mquantiles
from ...base import is_regressor
from ...utils import (
Bunch,
_safe_indexing,
check_array,
check_random_state,
)
from ...utils._encode import _unique
from ...utils._optional_dependencies import check_matplotlib_support # noqa
from ...utils.parallel import Parallel, delayed
from .. import partial_dependence
from .._pd_utils import _check_feature_names, _get_feature_index
class PartialDependenceDisplay:
"""Partial Dependence Plot (PDP).
This can also display individual partial dependencies which are often
referred to as: Individual Condition Expectation (ICE).
It is recommended to use
:func:`~sklearn.inspection.PartialDependenceDisplay.from_estimator` to create a
:class:`~sklearn.inspection.PartialDependenceDisplay`. All parameters are
stored as attributes.
Read more in
:ref:`sphx_glr_auto_examples_miscellaneous_plot_partial_dependence_visualization_api.py`
and the :ref:`User Guide <partial_dependence>`.
.. versionadded:: 0.22
Parameters
----------
pd_results : list of Bunch
Results of :func:`~sklearn.inspection.partial_dependence` for
``features``.
features : list of (int,) or list of (int, int)
Indices of features for a given plot. A tuple of one integer will plot
a partial dependence curve of one feature. A tuple of two integers will
plot a two-way partial dependence curve as a contour plot.
feature_names : list of str
Feature names corresponding to the indices in ``features``.
target_idx : int
- In a multiclass setting, specifies the class for which the PDPs
should be computed. Note that for binary classification, the
positive class (index 1) is always used.
- In a multioutput setting, specifies the task for which the PDPs
should be computed.
Ignored in binary classification or classical regression settings.
deciles : dict
Deciles for feature indices in ``features``.
kind : {'average', 'individual', 'both'} or list of such str, \
default='average'
Whether to plot the partial dependence averaged across all the samples
in the dataset or one line per sample or both.
- ``kind='average'`` results in the traditional PD plot;
- ``kind='individual'`` results in the ICE plot;
- ``kind='both'`` results in plotting both the ICE and PD on the same
plot.
A list of such strings can be provided to specify `kind` on a per-plot
basis. The length of the list should be the same as the number of
interaction requested in `features`.
.. note::
ICE ('individual' or 'both') is not a valid option for 2-ways
interactions plot. As a result, an error will be raised.
2-ways interaction plots should always be configured to
use the 'average' kind instead.
.. note::
The fast ``method='recursion'`` option is only available for
`kind='average'` and `sample_weights=None`. Computing individual
dependencies and doing weighted averages requires using the slower
`method='brute'`.
.. versionadded:: 0.24
Add `kind` parameter with `'average'`, `'individual'`, and `'both'`
options.
.. versionadded:: 1.1
Add the possibility to pass a list of string specifying `kind`
for each plot.
subsample : float, int or None, default=1000
Sampling for ICE curves when `kind` is 'individual' or 'both'.
If float, should be between 0.0 and 1.0 and represent the proportion
of the dataset to be used to plot ICE curves. If int, represents the
maximum absolute number of samples to use.
Note that the full dataset is still used to calculate partial
dependence when `kind='both'`.
.. versionadded:: 0.24
random_state : int, RandomState instance or None, default=None
Controls the randomness of the selected samples when subsamples is not
`None`. See :term:`Glossary <random_state>` for details.
.. versionadded:: 0.24
is_categorical : list of (bool,) or list of (bool, bool), default=None
Whether each target feature in `features` is categorical or not.
The list should be same size as `features`. If `None`, all features
are assumed to be continuous.
.. versionadded:: 1.2
Attributes
----------
bounding_ax_ : matplotlib Axes or None
If `ax` is an axes or None, the `bounding_ax_` is the axes where the
grid of partial dependence plots are drawn. If `ax` is a list of axes
or a numpy array of axes, `bounding_ax_` is None.
axes_ : ndarray of matplotlib Axes
If `ax` is an axes or None, `axes_[i, j]` is the axes on the i-th row
and j-th column. If `ax` is a list of axes, `axes_[i]` is the i-th item
in `ax`. Elements that are None correspond to a nonexisting axes in
that position.
lines_ : ndarray of matplotlib Artists
If `ax` is an axes or None, `lines_[i, j]` is the partial dependence
curve on the i-th row and j-th column. If `ax` is a list of axes,
`lines_[i]` is the partial dependence curve corresponding to the i-th
item in `ax`. Elements that are None correspond to a nonexisting axes
or an axes that does not include a line plot.
deciles_vlines_ : ndarray of matplotlib LineCollection
If `ax` is an axes or None, `vlines_[i, j]` is the line collection
representing the x axis deciles of the i-th row and j-th column. If
`ax` is a list of axes, `vlines_[i]` corresponds to the i-th item in
`ax`. Elements that are None correspond to a nonexisting axes or an
axes that does not include a PDP plot.
.. versionadded:: 0.23
deciles_hlines_ : ndarray of matplotlib LineCollection
If `ax` is an axes or None, `vlines_[i, j]` is the line collection
representing the y axis deciles of the i-th row and j-th column. If
`ax` is a list of axes, `vlines_[i]` corresponds to the i-th item in
`ax`. Elements that are None correspond to a nonexisting axes or an
axes that does not include a 2-way plot.
.. versionadded:: 0.23
contours_ : ndarray of matplotlib Artists
If `ax` is an axes or None, `contours_[i, j]` is the partial dependence
plot on the i-th row and j-th column. If `ax` is a list of axes,
`contours_[i]` is the partial dependence plot corresponding to the i-th
item in `ax`. Elements that are None correspond to a nonexisting axes
or an axes that does not include a contour plot.
bars_ : ndarray of matplotlib Artists
If `ax` is an axes or None, `bars_[i, j]` is the partial dependence bar
plot on the i-th row and j-th column (for a categorical feature).
If `ax` is a list of axes, `bars_[i]` is the partial dependence bar
plot corresponding to the i-th item in `ax`. Elements that are None
correspond to a nonexisting axes or an axes that does not include a
bar plot.
.. versionadded:: 1.2
heatmaps_ : ndarray of matplotlib Artists
If `ax` is an axes or None, `heatmaps_[i, j]` is the partial dependence
heatmap on the i-th row and j-th column (for a pair of categorical
features) . If `ax` is a list of axes, `heatmaps_[i]` is the partial
dependence heatmap corresponding to the i-th item in `ax`. Elements
that are None correspond to a nonexisting axes or an axes that does not
include a heatmap.
.. versionadded:: 1.2
figure_ : matplotlib Figure
Figure containing partial dependence plots.
See Also
--------
partial_dependence : Compute Partial Dependence values.
PartialDependenceDisplay.from_estimator : Plot Partial Dependence.
Examples
--------
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.ensemble import GradientBoostingRegressor
>>> from sklearn.inspection import PartialDependenceDisplay
>>> from sklearn.inspection import partial_dependence
>>> X, y = make_friedman1()
>>> clf = GradientBoostingRegressor(n_estimators=10).fit(X, y)
>>> features, feature_names = [(0,)], [f"Features #{i}" for i in range(X.shape[1])]
>>> deciles = {0: np.linspace(0, 1, num=5)}
>>> pd_results = partial_dependence(
... clf, X, features=0, kind="average", grid_resolution=5)
>>> display = PartialDependenceDisplay(
... [pd_results], features=features, feature_names=feature_names,
... target_idx=0, deciles=deciles
... )
>>> display.plot(pdp_lim={1: (-1.38, 0.66)})
<...>
>>> plt.show()
"""
def __init__(
self,
pd_results,
*,
features,
feature_names,
target_idx,
deciles,
kind="average",
subsample=1000,
random_state=None,
is_categorical=None,
):
self.pd_results = pd_results
self.features = features
self.feature_names = feature_names
self.target_idx = target_idx
self.deciles = deciles
self.kind = kind
self.subsample = subsample
self.random_state = random_state
self.is_categorical = is_categorical
@classmethod
def from_estimator(
cls,
estimator,
X,
features,
*,
sample_weight=None,
categorical_features=None,
feature_names=None,
target=None,
response_method="auto",
n_cols=3,
grid_resolution=100,
percentiles=(0.05, 0.95),
method="auto",
n_jobs=None,
verbose=0,
line_kw=None,
ice_lines_kw=None,
pd_line_kw=None,
contour_kw=None,
ax=None,
kind="average",
centered=False,
subsample=1000,
random_state=None,
):
"""Partial dependence (PD) and individual conditional expectation (ICE) plots.
Partial dependence plots, individual conditional expectation plots or an
overlay of both of them can be plotted by setting the ``kind``
parameter. The ``len(features)`` plots are arranged in a grid with
``n_cols`` columns. Two-way partial dependence plots are plotted as
contour plots. The deciles of the feature values will be shown with tick
marks on the x-axes for one-way plots, and on both axes for two-way
plots.
Read more in the :ref:`User Guide <partial_dependence>`.
.. note::
:func:`PartialDependenceDisplay.from_estimator` does not support using the
same axes with multiple calls. To plot the partial dependence for
multiple estimators, please pass the axes created by the first call to the
second call::
>>> from sklearn.inspection import PartialDependenceDisplay
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.linear_model import LinearRegression
>>> from sklearn.ensemble import RandomForestRegressor
>>> X, y = make_friedman1()
>>> est1 = LinearRegression().fit(X, y)
>>> est2 = RandomForestRegressor().fit(X, y)
>>> disp1 = PartialDependenceDisplay.from_estimator(est1, X,
... [1, 2])
>>> disp2 = PartialDependenceDisplay.from_estimator(est2, X, [1, 2],
... ax=disp1.axes_)
.. warning::
For :class:`~sklearn.ensemble.GradientBoostingClassifier` and
:class:`~sklearn.ensemble.GradientBoostingRegressor`, the
`'recursion'` method (used by default) will not account for the `init`
predictor of the boosting process. In practice, this will produce
the same values as `'brute'` up to a constant offset in the target
response, provided that `init` is a constant estimator (which is the
default). However, if `init` is not a constant estimator, the
partial dependence values are incorrect for `'recursion'` because the
offset will be sample-dependent. It is preferable to use the `'brute'`
method. Note that this only applies to
:class:`~sklearn.ensemble.GradientBoostingClassifier` and
:class:`~sklearn.ensemble.GradientBoostingRegressor`, not to
:class:`~sklearn.ensemble.HistGradientBoostingClassifier` and
:class:`~sklearn.ensemble.HistGradientBoostingRegressor`.
.. versionadded:: 1.0
Parameters
----------
estimator : BaseEstimator
A fitted estimator object implementing :term:`predict`,
:term:`predict_proba`, or :term:`decision_function`.
Multioutput-multiclass classifiers are not supported.
X : {array-like, dataframe} of shape (n_samples, n_features)
``X`` is used to generate a grid of values for the target
``features`` (where the partial dependence will be evaluated), and
also to generate values for the complement features when the
`method` is `'brute'`.
features : list of {int, str, pair of int, pair of str}
The target features for which to create the PDPs.
If `features[i]` is an integer or a string, a one-way PDP is created;
if `features[i]` is a tuple, a two-way PDP is created (only supported
with `kind='average'`). Each tuple must be of size 2.
If any entry is a string, then it must be in ``feature_names``.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights are used to calculate weighted means when averaging the
model output. If `None`, then samples are equally weighted. If
`sample_weight` is not `None`, then `method` will be set to `'brute'`.
Note that `sample_weight` is ignored for `kind='individual'`.
.. versionadded:: 1.3
categorical_features : array-like of shape (n_features,) or shape \
(n_categorical_features,), dtype={bool, int, str}, default=None
Indicates the categorical features.
- `None`: no feature will be considered categorical;
- boolean array-like: boolean mask of shape `(n_features,)`
indicating which features are categorical. Thus, this array has
the same shape has `X.shape[1]`;
- integer or string array-like: integer indices or strings
indicating categorical features.
.. versionadded:: 1.2
feature_names : array-like of shape (n_features,), dtype=str, default=None
Name of each feature; `feature_names[i]` holds the name of the feature
with index `i`.
By default, the name of the feature corresponds to their numerical
index for NumPy array and their column name for pandas dataframe.
target : int, default=None
- In a multiclass setting, specifies the class for which the PDPs
should be computed. Note that for binary classification, the
positive class (index 1) is always used.
- In a multioutput setting, specifies the task for which the PDPs
should be computed.
Ignored in binary classification or classical regression settings.
response_method : {'auto', 'predict_proba', 'decision_function'}, \
default='auto'
Specifies whether to use :term:`predict_proba` or
:term:`decision_function` as the target response. For regressors
this parameter is ignored and the response is always the output of
:term:`predict`. By default, :term:`predict_proba` is tried first
and we revert to :term:`decision_function` if it doesn't exist. If
``method`` is `'recursion'`, the response is always the output of
:term:`decision_function`.
n_cols : int, default=3
The maximum number of columns in the grid plot. Only active when `ax`
is a single axis or `None`.
grid_resolution : int, default=100
The number of equally spaced points on the axes of the plots, for each
target feature.
percentiles : tuple of float, default=(0.05, 0.95)
The lower and upper percentile used to create the extreme values
for the PDP axes. Must be in [0, 1].
method : str, default='auto'
The method used to calculate the averaged predictions:
- `'recursion'` is only supported for some tree-based estimators
(namely
:class:`~sklearn.ensemble.GradientBoostingClassifier`,
:class:`~sklearn.ensemble.GradientBoostingRegressor`,
:class:`~sklearn.ensemble.HistGradientBoostingClassifier`,
:class:`~sklearn.ensemble.HistGradientBoostingRegressor`,
:class:`~sklearn.tree.DecisionTreeRegressor`,
:class:`~sklearn.ensemble.RandomForestRegressor`
but is more efficient in terms of speed.
With this method, the target response of a
classifier is always the decision function, not the predicted
probabilities. Since the `'recursion'` method implicitly computes
the average of the ICEs by design, it is not compatible with ICE and
thus `kind` must be `'average'`.
- `'brute'` is supported for any estimator, but is more
computationally intensive.
- `'auto'`: the `'recursion'` is used for estimators that support it,
and `'brute'` is used otherwise. If `sample_weight` is not `None`,
then `'brute'` is used regardless of the estimator.
Please see :ref:`this note <pdp_method_differences>` for
differences between the `'brute'` and `'recursion'` method.
n_jobs : int, default=None
The number of CPUs to use to compute the partial dependences.
Computation is parallelized over features specified by the `features`
parameter.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
verbose : int, default=0
Verbose output during PD computations.
line_kw : dict, default=None
Dict with keywords passed to the ``matplotlib.pyplot.plot`` call.
For one-way partial dependence plots. It can be used to define common
properties for both `ice_lines_kw` and `pdp_line_kw`.
ice_lines_kw : dict, default=None
Dictionary with keywords passed to the `matplotlib.pyplot.plot` call.
For ICE lines in the one-way partial dependence plots.
The key value pairs defined in `ice_lines_kw` takes priority over
`line_kw`.
pd_line_kw : dict, default=None
Dictionary with keywords passed to the `matplotlib.pyplot.plot` call.
For partial dependence in one-way partial dependence plots.
The key value pairs defined in `pd_line_kw` takes priority over
`line_kw`.
contour_kw : dict, default=None
Dict with keywords passed to the ``matplotlib.pyplot.contourf`` call.
For two-way partial dependence plots.
ax : Matplotlib axes or array-like of Matplotlib axes, default=None
- If a single axis is passed in, it is treated as a bounding axes
and a grid of partial dependence plots will be drawn within
these bounds. The `n_cols` parameter controls the number of
columns in the grid.
- If an array-like of axes are passed in, the partial dependence
plots will be drawn directly into these axes.
- If `None`, a figure and a bounding axes is created and treated
as the single axes case.
kind : {'average', 'individual', 'both'}, default='average'
Whether to plot the partial dependence averaged across all the samples
in the dataset or one line per sample or both.
- ``kind='average'`` results in the traditional PD plot;
- ``kind='individual'`` results in the ICE plot.
Note that the fast `method='recursion'` option is only available for
`kind='average'` and `sample_weights=None`. Computing individual
dependencies and doing weighted averages requires using the slower
`method='brute'`.
centered : bool, default=False
If `True`, the ICE and PD lines will start at the origin of the
y-axis. By default, no centering is done.
.. versionadded:: 1.1
subsample : float, int or None, default=1000
Sampling for ICE curves when `kind` is 'individual' or 'both'.
If `float`, should be between 0.0 and 1.0 and represent the proportion
of the dataset to be used to plot ICE curves. If `int`, represents the
absolute number samples to use.
Note that the full dataset is still used to calculate averaged partial
dependence when `kind='both'`.
random_state : int, RandomState instance or None, default=None
Controls the randomness of the selected samples when subsamples is not
`None` and `kind` is either `'both'` or `'individual'`.
See :term:`Glossary <random_state>` for details.
Returns
-------
display : :class:`~sklearn.inspection.PartialDependenceDisplay`
See Also
--------
partial_dependence : Compute Partial Dependence values.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.ensemble import GradientBoostingRegressor
>>> from sklearn.inspection import PartialDependenceDisplay
>>> X, y = make_friedman1()
>>> clf = GradientBoostingRegressor(n_estimators=10).fit(X, y)
>>> PartialDependenceDisplay.from_estimator(clf, X, [0, (0, 1)])
<...>
>>> plt.show()
"""
check_matplotlib_support(f"{cls.__name__}.from_estimator") # noqa
import matplotlib.pyplot as plt # noqa
# set target_idx for multi-class estimators
if hasattr(estimator, "classes_") and np.size(estimator.classes_) > 2:
if target is None:
raise ValueError("target must be specified for multi-class")
target_idx = np.searchsorted(estimator.classes_, target)
if (
not (0 <= target_idx < len(estimator.classes_))
or estimator.classes_[target_idx] != target
):
raise ValueError("target not in est.classes_, got {}".format(target))
else:
# regression and binary classification
target_idx = 0
# Use check_array only on lists and other non-array-likes / sparse. Do not
# convert DataFrame into a NumPy array.
if not (hasattr(X, "__array__") or sparse.issparse(X)):
X = check_array(X, force_all_finite="allow-nan", dtype=object)
n_features = X.shape[1]
feature_names = _check_feature_names(X, feature_names)
# expand kind to always be a list of str
kind_ = [kind] * len(features) if isinstance(kind, str) else kind
if len(kind_) != len(features):
raise ValueError(
"When `kind` is provided as a list of strings, it should contain "
f"as many elements as `features`. `kind` contains {len(kind_)} "
f"element(s) and `features` contains {len(features)} element(s)."
)
# convert features into a seq of int tuples
tmp_features, ice_for_two_way_pd = [], []
for kind_plot, fxs in zip(kind_, features):
if isinstance(fxs, (numbers.Integral, str)):
fxs = (fxs,)
try:
fxs = tuple(
_get_feature_index(fx, feature_names=feature_names) for fx in fxs
)
except TypeError as e:
raise ValueError(
"Each entry in features must be either an int, "
"a string, or an iterable of size at most 2."
) from e
if not 1 <= np.size(fxs) <= 2:
raise ValueError(
"Each entry in features must be either an int, "
"a string, or an iterable of size at most 2."
)
# store the information if 2-way PD was requested with ICE to later
# raise a ValueError with an exhaustive list of problematic
# settings.
ice_for_two_way_pd.append(kind_plot != "average" and np.size(fxs) > 1)
tmp_features.append(fxs)
if any(ice_for_two_way_pd):
# raise an error and be specific regarding the parameter values
# when 1- and 2-way PD were requested
kind_ = [
"average" if forcing_average else kind_plot
for forcing_average, kind_plot in zip(ice_for_two_way_pd, kind_)
]
raise ValueError(
"ICE plot cannot be rendered for 2-way feature interactions. "
"2-way feature interactions mandates PD plots using the "
"'average' kind: "
f"features={features!r} should be configured to use "
f"kind={kind_!r} explicitly."
)
features = tmp_features
if categorical_features is None:
is_categorical = [
(False,) if len(fxs) == 1 else (False, False) for fxs in features
]
else:
# we need to create a boolean indicator of which features are
# categorical from the categorical_features list.
categorical_features = np.asarray(categorical_features)
if categorical_features.dtype.kind == "b":
# categorical features provided as a list of boolean
if categorical_features.size != n_features:
raise ValueError(
"When `categorical_features` is a boolean array-like, "
"the array should be of shape (n_features,). Got "
f"{categorical_features.size} elements while `X` contains "
f"{n_features} features."
)
is_categorical = [
tuple(categorical_features[fx] for fx in fxs) for fxs in features
]
elif categorical_features.dtype.kind in ("i", "O", "U"):
# categorical features provided as a list of indices or feature names
categorical_features_idx = [
_get_feature_index(cat, feature_names=feature_names)
for cat in categorical_features
]
is_categorical = [
tuple([idx in categorical_features_idx for idx in fxs])
for fxs in features
]
else:
raise ValueError(
"Expected `categorical_features` to be an array-like of boolean,"
f" integer, or string. Got {categorical_features.dtype} instead."
)
for cats in is_categorical:
if np.size(cats) == 2 and (cats[0] != cats[1]):
raise ValueError(
"Two-way partial dependence plots are not supported for pairs"
" of continuous and categorical features."
)
# collect the indices of the categorical features targeted by the partial
# dependence computation
categorical_features_targeted = set(
[
fx
for fxs, cats in zip(features, is_categorical)
for fx in fxs
if any(cats)
]
)
if categorical_features_targeted:
min_n_cats = min(
[
len(_unique(_safe_indexing(X, idx, axis=1)))
for idx in categorical_features_targeted
]
)
if grid_resolution < min_n_cats:
raise ValueError(
"The resolution of the computed grid is less than the "
"minimum number of categories in the targeted categorical "
"features. Expect the `grid_resolution` to be greater than "
f"{min_n_cats}. Got {grid_resolution} instead."
)
for is_cat, kind_plot in zip(is_categorical, kind_):
if any(is_cat) and kind_plot != "average":
raise ValueError(
"It is not possible to display individual effects for"
" categorical features."
)
# Early exit if the axes does not have the correct number of axes
if ax is not None and not isinstance(ax, plt.Axes):
axes = np.asarray(ax, dtype=object)
if axes.size != len(features):
raise ValueError(
"Expected ax to have {} axes, got {}".format(
len(features), axes.size
)
)
for i in chain.from_iterable(features):
if i >= len(feature_names):
raise ValueError(
"All entries of features must be less than "
"len(feature_names) = {0}, got {1}.".format(len(feature_names), i)
)
if isinstance(subsample, numbers.Integral):
if subsample <= 0:
raise ValueError(
f"When an integer, subsample={subsample} should be positive."
)
elif isinstance(subsample, numbers.Real):
if subsample <= 0 or subsample >= 1:
raise ValueError(
f"When a floating-point, subsample={subsample} should be in "
"the (0, 1) range."
)
# compute predictions and/or averaged predictions
pd_results = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(partial_dependence)(
estimator,
X,
fxs,
sample_weight=sample_weight,
feature_names=feature_names,
categorical_features=categorical_features,
response_method=response_method,
method=method,
grid_resolution=grid_resolution,
percentiles=percentiles,
kind=kind_plot,
)
for kind_plot, fxs in zip(kind_, features)
)
# For multioutput regression, we can only check the validity of target
# now that we have the predictions.
# Also note: as multiclass-multioutput classifiers are not supported,
# multiclass and multioutput scenario are mutually exclusive. So there is
# no risk of overwriting target_idx here.
pd_result = pd_results[0] # checking the first result is enough
n_tasks = (
pd_result.average.shape[0]
if kind_[0] == "average"
else pd_result.individual.shape[0]
)
if is_regressor(estimator) and n_tasks > 1:
if target is None:
raise ValueError("target must be specified for multi-output regressors")
if not 0 <= target <= n_tasks:
raise ValueError(
"target must be in [0, n_tasks], got {}.".format(target)
)
target_idx = target
deciles = {}
for fxs, cats in zip(features, is_categorical):
for fx, cat in zip(fxs, cats):
if not cat and fx not in deciles:
X_col = _safe_indexing(X, fx, axis=1)
deciles[fx] = mquantiles(X_col, prob=np.arange(0.1, 1.0, 0.1))
display = cls(
pd_results=pd_results,
features=features,
feature_names=feature_names,
target_idx=target_idx,
deciles=deciles,
kind=kind,
subsample=subsample,
random_state=random_state,
is_categorical=is_categorical,
)
return display.plot(
ax=ax,
n_cols=n_cols,
line_kw=line_kw,
ice_lines_kw=ice_lines_kw,
pd_line_kw=pd_line_kw,
contour_kw=contour_kw,
centered=centered,
)
def _get_sample_count(self, n_samples):
"""Compute the number of samples as an integer."""
if isinstance(self.subsample, numbers.Integral):
if self.subsample < n_samples:
return self.subsample
return n_samples
elif isinstance(self.subsample, numbers.Real):
return ceil(n_samples * self.subsample)
return n_samples
def _plot_ice_lines(
self,
preds,
feature_values,
n_ice_to_plot,
ax,
pd_plot_idx,
n_total_lines_by_plot,
individual_line_kw,
):
"""Plot the ICE lines.
Parameters
----------
preds : ndarray of shape \
(n_instances, n_grid_points)
The predictions computed for all points of `feature_values` for a
given feature for all samples in `X`.
feature_values : ndarray of shape (n_grid_points,)
The feature values for which the predictions have been computed.
n_ice_to_plot : int
The number of ICE lines to plot.
ax : Matplotlib axes
The axis on which to plot the ICE lines.
pd_plot_idx : int
The sequential index of the plot. It will be unraveled to find the
matching 2D position in the grid layout.
n_total_lines_by_plot : int
The total number of lines expected to be plot on the axis.
individual_line_kw : dict
Dict with keywords passed when plotting the ICE lines.
"""
rng = check_random_state(self.random_state)
# subsample ice
ice_lines_idx = rng.choice(
preds.shape[0],
n_ice_to_plot,
replace=False,
)
ice_lines_subsampled = preds[ice_lines_idx, :]
# plot the subsampled ice
for ice_idx, ice in enumerate(ice_lines_subsampled):
line_idx = np.unravel_index(
pd_plot_idx * n_total_lines_by_plot + ice_idx, self.lines_.shape
)
self.lines_[line_idx] = ax.plot(
feature_values, ice.ravel(), **individual_line_kw
)[0]
def _plot_average_dependence(
self,
avg_preds,
feature_values,
ax,
pd_line_idx,
line_kw,
categorical,
bar_kw,
):
"""Plot the average partial dependence.
Parameters
----------
avg_preds : ndarray of shape (n_grid_points,)
The average predictions for all points of `feature_values` for a
given feature for all samples in `X`.
feature_values : ndarray of shape (n_grid_points,)
The feature values for which the predictions have been computed.
ax : Matplotlib axes
The axis on which to plot the average PD.
pd_line_idx : int
The sequential index of the plot. It will be unraveled to find the
matching 2D position in the grid layout.
line_kw : dict
Dict with keywords passed when plotting the PD plot.
categorical : bool
Whether feature is categorical.
bar_kw: dict
Dict with keywords passed when plotting the PD bars (categorical).
"""
if categorical:
bar_idx = np.unravel_index(pd_line_idx, self.bars_.shape)
self.bars_[bar_idx] = ax.bar(feature_values, avg_preds, **bar_kw)[0]
ax.tick_params(axis="x", rotation=90)
else:
line_idx = np.unravel_index(pd_line_idx, self.lines_.shape)
self.lines_[line_idx] = ax.plot(
feature_values,
avg_preds,
**line_kw,
)[0]
def _plot_one_way_partial_dependence(
self,
kind,
preds,
avg_preds,
feature_values,
feature_idx,
n_ice_lines,
ax,
n_cols,
pd_plot_idx,
n_lines,
ice_lines_kw,
pd_line_kw,
categorical,
bar_kw,
pdp_lim,
):
"""Plot 1-way partial dependence: ICE and PDP.
Parameters
----------
kind : str
The kind of partial plot to draw.
preds : ndarray of shape \
(n_instances, n_grid_points) or None
The predictions computed for all points of `feature_values` for a
given feature for all samples in `X`.
avg_preds : ndarray of shape (n_grid_points,)
The average predictions for all points of `feature_values` for a
given feature for all samples in `X`.
feature_values : ndarray of shape (n_grid_points,)
The feature values for which the predictions have been computed.
feature_idx : int
The index corresponding to the target feature.
n_ice_lines : int
The number of ICE lines to plot.
ax : Matplotlib axes
The axis on which to plot the ICE and PDP lines.
n_cols : int or None
The number of column in the axis.
pd_plot_idx : int
The sequential index of the plot. It will be unraveled to find the
matching 2D position in the grid layout.
n_lines : int
The total number of lines expected to be plot on the axis.
ice_lines_kw : dict
Dict with keywords passed when plotting the ICE lines.
pd_line_kw : dict
Dict with keywords passed when plotting the PD plot.
categorical : bool
Whether feature is categorical.
bar_kw: dict
Dict with keywords passed when plotting the PD bars (categorical).
pdp_lim : dict
Global min and max average predictions, such that all plots will
have the same scale and y limits. `pdp_lim[1]` is the global min
and max for single partial dependence curves.
"""
from matplotlib import transforms # noqa
if kind in ("individual", "both"):
self._plot_ice_lines(
preds[self.target_idx],
feature_values,
n_ice_lines,
ax,
pd_plot_idx,
n_lines,
ice_lines_kw,
)
if kind in ("average", "both"):
# the average is stored as the last line
if kind == "average":
pd_line_idx = pd_plot_idx
else:
pd_line_idx = pd_plot_idx * n_lines + n_ice_lines
self._plot_average_dependence(
avg_preds[self.target_idx].ravel(),
feature_values,
ax,
pd_line_idx,
pd_line_kw,
categorical,
bar_kw,
)
trans = transforms.blended_transform_factory(ax.transData, ax.transAxes)
# create the decile line for the vertical axis
vlines_idx = np.unravel_index(pd_plot_idx, self.deciles_vlines_.shape)
if self.deciles.get(feature_idx[0], None) is not None:
self.deciles_vlines_[vlines_idx] = ax.vlines(
self.deciles[feature_idx[0]],
0,
0.05,
transform=trans,
color="k",
)
# reset ylim which was overwritten by vlines
min_val = min(val[0] for val in pdp_lim.values())
max_val = max(val[1] for val in pdp_lim.values())
ax.set_ylim([min_val, max_val])
# Set xlabel if it is not already set
if not ax.get_xlabel():
ax.set_xlabel(self.feature_names[feature_idx[0]])
if n_cols is None or pd_plot_idx % n_cols == 0:
if not ax.get_ylabel():
ax.set_ylabel("Partial dependence")
else:
ax.set_yticklabels([])
if pd_line_kw.get("label", None) and kind != "individual" and not categorical:
ax.legend()
def _plot_two_way_partial_dependence(
self,
avg_preds,
feature_values,
feature_idx,
ax,
pd_plot_idx,
Z_level,
contour_kw,
categorical,
heatmap_kw,
):
"""Plot 2-way partial dependence.