forked from scikit-learn/scikit-learn
-
Notifications
You must be signed in to change notification settings - Fork 0
/
gradient_boosting.py
1854 lines (1626 loc) · 73.1 KB
/
gradient_boosting.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
"""Fast Gradient Boosting decision trees for classification and regression."""
# Author: Nicolas Hug
from abc import ABC, abstractmethod
from functools import partial
import warnings
import numpy as np
from timeit import default_timer as time
from ..._loss.loss import (
_LOSSES,
BaseLoss,
AbsoluteError,
HalfBinomialLoss,
HalfMultinomialLoss,
HalfPoissonLoss,
HalfSquaredError,
PinballLoss,
)
from ...base import BaseEstimator, RegressorMixin, ClassifierMixin, is_classifier
from ...utils import check_random_state, resample
from ...utils.validation import (
check_is_fitted,
check_consistent_length,
_check_sample_weight,
)
from ...utils._openmp_helpers import _openmp_effective_n_threads
from ...utils.multiclass import check_classification_targets
from ...metrics import check_scoring
from ...model_selection import train_test_split
from ...preprocessing import LabelEncoder
from ._gradient_boosting import _update_raw_predictions
from .common import Y_DTYPE, X_DTYPE, G_H_DTYPE
from .binning import _BinMapper
from .grower import TreeGrower
_LOSSES = _LOSSES.copy()
# TODO(1.2): Remove "least_squares" and "least_absolute_deviation"
# TODO(1.3): Remove "binary_crossentropy" and "categorical_crossentropy"
_LOSSES.update(
{
"least_squares": HalfSquaredError,
"least_absolute_deviation": AbsoluteError,
"poisson": HalfPoissonLoss,
"quantile": PinballLoss,
"binary_crossentropy": HalfBinomialLoss,
"categorical_crossentropy": HalfMultinomialLoss,
}
)
def _update_leaves_values(loss, grower, y_true, raw_prediction, sample_weight):
"""Update the leaf values to be predicted by the tree.
Update equals:
loss.fit_intercept_only(y_true - raw_prediction)
This is only applied if loss.need_update_leaves_values is True.
Note: It only works, if the loss is a function of the residual, as is the
case for AbsoluteError and PinballLoss. Otherwise, one would need to get
the minimum of loss(y_true, raw_prediction + x) in x. A few examples:
- AbsoluteError: median(y_true - raw_prediction).
- PinballLoss: quantile(y_true - raw_prediction).
See also notes about need_update_leaves_values in BaseLoss.
"""
# TODO: Ideally this should be computed in parallel over the leaves using something
# similar to _update_raw_predictions(), but this requires a cython version of
# median().
for leaf in grower.finalized_leaves:
indices = leaf.sample_indices
if sample_weight is None:
sw = None
else:
sw = sample_weight[indices]
update = loss.fit_intercept_only(
y_true=y_true[indices] - raw_prediction[indices],
sample_weight=sw,
)
leaf.value = grower.shrinkage * update
# Note that the regularization is ignored here
class BaseHistGradientBoosting(BaseEstimator, ABC):
"""Base class for histogram-based gradient boosting estimators."""
@abstractmethod
def __init__(
self,
loss,
*,
learning_rate,
max_iter,
max_leaf_nodes,
max_depth,
min_samples_leaf,
l2_regularization,
max_bins,
categorical_features,
monotonic_cst,
warm_start,
early_stopping,
scoring,
validation_fraction,
n_iter_no_change,
tol,
verbose,
random_state,
):
self.loss = loss
self.learning_rate = learning_rate
self.max_iter = max_iter
self.max_leaf_nodes = max_leaf_nodes
self.max_depth = max_depth
self.min_samples_leaf = min_samples_leaf
self.l2_regularization = l2_regularization
self.max_bins = max_bins
self.monotonic_cst = monotonic_cst
self.categorical_features = categorical_features
self.warm_start = warm_start
self.early_stopping = early_stopping
self.scoring = scoring
self.validation_fraction = validation_fraction
self.n_iter_no_change = n_iter_no_change
self.tol = tol
self.verbose = verbose
self.random_state = random_state
def _validate_parameters(self):
"""Validate parameters passed to __init__.
The parameters that are directly passed to the grower are checked in
TreeGrower."""
if self.loss not in self._VALID_LOSSES and not isinstance(self.loss, BaseLoss):
raise ValueError(
"Loss {} is not supported for {}. Accepted losses: {}.".format(
self.loss, self.__class__.__name__, ", ".join(self._VALID_LOSSES)
)
)
if self.learning_rate <= 0:
raise ValueError(
"learning_rate={} must be strictly positive".format(self.learning_rate)
)
if self.max_iter < 1:
raise ValueError(
"max_iter={} must not be smaller than 1.".format(self.max_iter)
)
if self.n_iter_no_change < 0:
raise ValueError(
"n_iter_no_change={} must be positive.".format(self.n_iter_no_change)
)
if self.validation_fraction is not None and self.validation_fraction <= 0:
raise ValueError(
"validation_fraction={} must be strictly positive, or None.".format(
self.validation_fraction
)
)
if self.tol < 0:
raise ValueError("tol={} must not be smaller than 0.".format(self.tol))
if not (2 <= self.max_bins <= 255):
raise ValueError(
"max_bins={} should be no smaller than 2 "
"and no larger than 255.".format(self.max_bins)
)
if self.monotonic_cst is not None and self.n_trees_per_iteration_ != 1:
raise ValueError(
"monotonic constraints are not supported for multiclass classification."
)
def _check_categories(self, X):
"""Check and validate categorical features in X
Return
------
is_categorical : ndarray of shape (n_features,) or None, dtype=bool
Indicates whether a feature is categorical. If no feature is
categorical, this is None.
known_categories : list of size n_features or None
The list contains, for each feature:
- an array of shape (n_categories,) with the unique cat values
- None if the feature is not categorical
None if no feature is categorical.
"""
if self.categorical_features is None:
return None, None
categorical_features = np.asarray(self.categorical_features)
if categorical_features.size == 0:
return None, None
if categorical_features.dtype.kind not in ("i", "b"):
raise ValueError(
"categorical_features must be an array-like of "
"bools or array-like of ints."
)
n_features = X.shape[1]
# check for categorical features as indices
if categorical_features.dtype.kind == "i":
if (
np.max(categorical_features) >= n_features
or np.min(categorical_features) < 0
):
raise ValueError(
"categorical_features set as integer "
"indices must be in [0, n_features - 1]"
)
is_categorical = np.zeros(n_features, dtype=bool)
is_categorical[categorical_features] = True
else:
if categorical_features.shape[0] != n_features:
raise ValueError(
"categorical_features set as a boolean mask "
"must have shape (n_features,), got: "
f"{categorical_features.shape}"
)
is_categorical = categorical_features
if not np.any(is_categorical):
return None, None
# compute the known categories in the training data. We need to do
# that here instead of in the BinMapper because in case of early
# stopping, the mapper only gets a fraction of the training data.
known_categories = []
for f_idx in range(n_features):
if is_categorical[f_idx]:
categories = np.unique(X[:, f_idx])
missing = np.isnan(categories)
if missing.any():
categories = categories[~missing]
if categories.size > self.max_bins:
raise ValueError(
f"Categorical feature at index {f_idx} is "
"expected to have a "
f"cardinality <= {self.max_bins}"
)
if (categories >= self.max_bins).any():
raise ValueError(
f"Categorical feature at index {f_idx} is "
"expected to be encoded with "
f"values < {self.max_bins}"
)
else:
categories = None
known_categories.append(categories)
return is_categorical, known_categories
def fit(self, X, y, sample_weight=None):
"""Fit the gradient boosting model.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input samples.
y : array-like of shape (n_samples,)
Target values.
sample_weight : array-like of shape (n_samples,) default=None
Weights of training data.
.. versionadded:: 0.23
Returns
-------
self : object
Fitted estimator.
"""
fit_start_time = time()
acc_find_split_time = 0.0 # time spent finding the best splits
acc_apply_split_time = 0.0 # time spent splitting nodes
acc_compute_hist_time = 0.0 # time spent computing histograms
# time spent predicting X for gradient and hessians update
acc_prediction_time = 0.0
X, y = self._validate_data(X, y, dtype=[X_DTYPE], force_all_finite=False)
y = self._encode_y(y)
check_consistent_length(X, y)
# Do not create unit sample weights by default to later skip some
# computation
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X, dtype=np.float64)
# TODO: remove when PDP supports sample weights
self._fitted_with_sw = True
rng = check_random_state(self.random_state)
# When warm starting, we want to re-use the same seed that was used
# the first time fit was called (e.g. for subsampling or for the
# train/val split).
if not (self.warm_start and self._is_fitted()):
self._random_seed = rng.randint(np.iinfo(np.uint32).max, dtype="u8")
self._validate_parameters()
# used for validation in predict
n_samples, self._n_features = X.shape
self.is_categorical_, known_categories = self._check_categories(X)
# we need this stateful variable to tell raw_predict() that it was
# called from fit() (this current method), and that the data it has
# received is pre-binned.
# predicting is faster on pre-binned data, so we want early stopping
# predictions to be made on pre-binned data. Unfortunately the _scorer
# can only call predict() or predict_proba(), not raw_predict(), and
# there's no way to tell the scorer that it needs to predict binned
# data.
self._in_fit = True
# `_openmp_effective_n_threads` is used to take cgroups CPU quotes
# into account when determine the maximum number of threads to use.
n_threads = _openmp_effective_n_threads()
if isinstance(self.loss, str):
self._loss = self._get_loss(sample_weight=sample_weight)
elif isinstance(self.loss, BaseLoss):
self._loss = self.loss
if self.early_stopping == "auto":
self.do_early_stopping_ = n_samples > 10000
else:
self.do_early_stopping_ = self.early_stopping
# create validation data if needed
self._use_validation_data = self.validation_fraction is not None
if self.do_early_stopping_ and self._use_validation_data:
# stratify for classification
# instead of checking predict_proba, loss.n_classes >= 2 would also work
stratify = y if hasattr(self._loss, "predict_proba") else None
# Save the state of the RNG for the training and validation split.
# This is needed in order to have the same split when using
# warm starting.
if sample_weight is None:
X_train, X_val, y_train, y_val = train_test_split(
X,
y,
test_size=self.validation_fraction,
stratify=stratify,
random_state=self._random_seed,
)
sample_weight_train = sample_weight_val = None
else:
# TODO: incorporate sample_weight in sampling here, as well as
# stratify
(
X_train,
X_val,
y_train,
y_val,
sample_weight_train,
sample_weight_val,
) = train_test_split(
X,
y,
sample_weight,
test_size=self.validation_fraction,
stratify=stratify,
random_state=self._random_seed,
)
else:
X_train, y_train, sample_weight_train = X, y, sample_weight
X_val = y_val = sample_weight_val = None
# Bin the data
# For ease of use of the API, the user-facing GBDT classes accept the
# parameter max_bins, which doesn't take into account the bin for
# missing values (which is always allocated). However, since max_bins
# isn't the true maximal number of bins, all other private classes
# (binmapper, histbuilder...) accept n_bins instead, which is the
# actual total number of bins. Everywhere in the code, the
# convention is that n_bins == max_bins + 1
n_bins = self.max_bins + 1 # + 1 for missing values
self._bin_mapper = _BinMapper(
n_bins=n_bins,
is_categorical=self.is_categorical_,
known_categories=known_categories,
random_state=self._random_seed,
n_threads=n_threads,
)
X_binned_train = self._bin_data(X_train, is_training_data=True)
if X_val is not None:
X_binned_val = self._bin_data(X_val, is_training_data=False)
else:
X_binned_val = None
# Uses binned data to check for missing values
has_missing_values = (
(X_binned_train == self._bin_mapper.missing_values_bin_idx_)
.any(axis=0)
.astype(np.uint8)
)
if self.verbose:
print("Fitting gradient boosted rounds:")
n_samples = X_binned_train.shape[0]
# First time calling fit, or no warm start
if not (self._is_fitted() and self.warm_start):
# Clear random state and score attributes
self._clear_state()
# initialize raw_predictions: those are the accumulated values
# predicted by the trees for the training data. raw_predictions has
# shape (n_samples, n_trees_per_iteration) where
# n_trees_per_iterations is n_classes in multiclass classification,
# else 1.
# self._baseline_prediction has shape (1, n_trees_per_iteration)
self._baseline_prediction = self._loss.fit_intercept_only(
y_true=y_train, sample_weight=sample_weight_train
).reshape((1, -1))
raw_predictions = np.zeros(
shape=(n_samples, self.n_trees_per_iteration_),
dtype=self._baseline_prediction.dtype,
order="F",
)
raw_predictions += self._baseline_prediction
# predictors is a matrix (list of lists) of TreePredictor objects
# with shape (n_iter_, n_trees_per_iteration)
self._predictors = predictors = []
# Initialize structures and attributes related to early stopping
self._scorer = None # set if scoring != loss
raw_predictions_val = None # set if scoring == loss and use val
self.train_score_ = []
self.validation_score_ = []
if self.do_early_stopping_:
# populate train_score and validation_score with the
# predictions of the initial model (before the first tree)
if self.scoring == "loss":
# we're going to compute scoring w.r.t the loss. As losses
# take raw predictions as input (unlike the scorers), we
# can optimize a bit and avoid repeating computing the
# predictions of the previous trees. We'll re-use
# raw_predictions (as it's needed for training anyway) for
# evaluating the training loss, and create
# raw_predictions_val for storing the raw predictions of
# the validation data.
if self._use_validation_data:
raw_predictions_val = np.zeros(
shape=(X_binned_val.shape[0], self.n_trees_per_iteration_),
dtype=self._baseline_prediction.dtype,
order="F",
)
raw_predictions_val += self._baseline_prediction
self._check_early_stopping_loss(
raw_predictions=raw_predictions,
y_train=y_train,
sample_weight_train=sample_weight_train,
raw_predictions_val=raw_predictions_val,
y_val=y_val,
sample_weight_val=sample_weight_val,
n_threads=n_threads,
)
else:
self._scorer = check_scoring(self, self.scoring)
# _scorer is a callable with signature (est, X, y) and
# calls est.predict() or est.predict_proba() depending on
# its nature.
# Unfortunately, each call to _scorer() will compute
# the predictions of all the trees. So we use a subset of
# the training set to compute train scores.
# Compute the subsample set
(
X_binned_small_train,
y_small_train,
sample_weight_small_train,
) = self._get_small_trainset(
X_binned_train, y_train, sample_weight_train, self._random_seed
)
self._check_early_stopping_scorer(
X_binned_small_train,
y_small_train,
sample_weight_small_train,
X_binned_val,
y_val,
sample_weight_val,
)
begin_at_stage = 0
# warm start: this is not the first time fit was called
else:
# Check that the maximum number of iterations is not smaller
# than the number of iterations from the previous fit
if self.max_iter < self.n_iter_:
raise ValueError(
"max_iter=%d must be larger than or equal to "
"n_iter_=%d when warm_start==True" % (self.max_iter, self.n_iter_)
)
# Convert array attributes to lists
self.train_score_ = self.train_score_.tolist()
self.validation_score_ = self.validation_score_.tolist()
# Compute raw predictions
raw_predictions = self._raw_predict(X_binned_train, n_threads=n_threads)
if self.do_early_stopping_ and self._use_validation_data:
raw_predictions_val = self._raw_predict(
X_binned_val, n_threads=n_threads
)
else:
raw_predictions_val = None
if self.do_early_stopping_ and self.scoring != "loss":
# Compute the subsample set
(
X_binned_small_train,
y_small_train,
sample_weight_small_train,
) = self._get_small_trainset(
X_binned_train, y_train, sample_weight_train, self._random_seed
)
# Get the predictors from the previous fit
predictors = self._predictors
begin_at_stage = self.n_iter_
# initialize gradients and hessians (empty arrays).
# shape = (n_samples, n_trees_per_iteration).
gradient, hessian = self._loss.init_gradient_and_hessian(
n_samples=n_samples, dtype=G_H_DTYPE, order="F"
)
for iteration in range(begin_at_stage, self.max_iter):
if self.verbose:
iteration_start_time = time()
print(
"[{}/{}] ".format(iteration + 1, self.max_iter), end="", flush=True
)
# Update gradients and hessians, inplace
# Note that self._loss expects shape (n_samples,) for
# n_trees_per_iteration = 1 else shape (n_samples, n_trees_per_iteration).
if self._loss.constant_hessian:
self._loss.gradient(
y_true=y_train,
raw_prediction=raw_predictions,
sample_weight=sample_weight_train,
gradient_out=gradient,
n_threads=n_threads,
)
else:
self._loss.gradient_hessian(
y_true=y_train,
raw_prediction=raw_predictions,
sample_weight=sample_weight_train,
gradient_out=gradient,
hessian_out=hessian,
n_threads=n_threads,
)
# Append a list since there may be more than 1 predictor per iter
predictors.append([])
# 2-d views of shape (n_samples, n_trees_per_iteration_) or (n_samples, 1)
# on gradient and hessian to simplify the loop over n_trees_per_iteration_.
if gradient.ndim == 1:
g_view = gradient.reshape((-1, 1))
h_view = hessian.reshape((-1, 1))
else:
g_view = gradient
h_view = hessian
# Build `n_trees_per_iteration` trees.
for k in range(self.n_trees_per_iteration_):
grower = TreeGrower(
X_binned=X_binned_train,
gradients=g_view[:, k],
hessians=h_view[:, k],
n_bins=n_bins,
n_bins_non_missing=self._bin_mapper.n_bins_non_missing_,
has_missing_values=has_missing_values,
is_categorical=self.is_categorical_,
monotonic_cst=self.monotonic_cst,
max_leaf_nodes=self.max_leaf_nodes,
max_depth=self.max_depth,
min_samples_leaf=self.min_samples_leaf,
l2_regularization=self.l2_regularization,
shrinkage=self.learning_rate,
n_threads=n_threads,
)
grower.grow()
acc_apply_split_time += grower.total_apply_split_time
acc_find_split_time += grower.total_find_split_time
acc_compute_hist_time += grower.total_compute_hist_time
if self._loss.need_update_leaves_values:
_update_leaves_values(
loss=self._loss,
grower=grower,
y_true=y_train,
raw_prediction=raw_predictions[:, k],
sample_weight=sample_weight_train,
)
predictor = grower.make_predictor(
binning_thresholds=self._bin_mapper.bin_thresholds_
)
predictors[-1].append(predictor)
# Update raw_predictions with the predictions of the newly
# created tree.
tic_pred = time()
_update_raw_predictions(raw_predictions[:, k], grower, n_threads)
toc_pred = time()
acc_prediction_time += toc_pred - tic_pred
should_early_stop = False
if self.do_early_stopping_:
if self.scoring == "loss":
# Update raw_predictions_val with the newest tree(s)
if self._use_validation_data:
for k, pred in enumerate(self._predictors[-1]):
raw_predictions_val[:, k] += pred.predict_binned(
X_binned_val,
self._bin_mapper.missing_values_bin_idx_,
n_threads,
)
should_early_stop = self._check_early_stopping_loss(
raw_predictions=raw_predictions,
y_train=y_train,
sample_weight_train=sample_weight_train,
raw_predictions_val=raw_predictions_val,
y_val=y_val,
sample_weight_val=sample_weight_val,
n_threads=n_threads,
)
else:
should_early_stop = self._check_early_stopping_scorer(
X_binned_small_train,
y_small_train,
sample_weight_small_train,
X_binned_val,
y_val,
sample_weight_val,
)
if self.verbose:
self._print_iteration_stats(iteration_start_time)
# maybe we could also early stop if all the trees are stumps?
if should_early_stop:
break
if self.verbose:
duration = time() - fit_start_time
n_total_leaves = sum(
predictor.get_n_leaf_nodes()
for predictors_at_ith_iteration in self._predictors
for predictor in predictors_at_ith_iteration
)
n_predictors = sum(
len(predictors_at_ith_iteration)
for predictors_at_ith_iteration in self._predictors
)
print(
"Fit {} trees in {:.3f} s, ({} total leaves)".format(
n_predictors, duration, n_total_leaves
)
)
print(
"{:<32} {:.3f}s".format(
"Time spent computing histograms:", acc_compute_hist_time
)
)
print(
"{:<32} {:.3f}s".format(
"Time spent finding best splits:", acc_find_split_time
)
)
print(
"{:<32} {:.3f}s".format(
"Time spent applying splits:", acc_apply_split_time
)
)
print(
"{:<32} {:.3f}s".format("Time spent predicting:", acc_prediction_time)
)
self.train_score_ = np.asarray(self.train_score_)
self.validation_score_ = np.asarray(self.validation_score_)
del self._in_fit # hard delete so we're sure it can't be used anymore
return self
def _is_fitted(self):
return len(getattr(self, "_predictors", [])) > 0
def _clear_state(self):
"""Clear the state of the gradient boosting model."""
for var in ("train_score_", "validation_score_"):
if hasattr(self, var):
delattr(self, var)
def _get_small_trainset(self, X_binned_train, y_train, sample_weight_train, seed):
"""Compute the indices of the subsample set and return this set.
For efficiency, we need to subsample the training set to compute scores
with scorers.
"""
# TODO: incorporate sample_weights here in `resample`
subsample_size = 10000
if X_binned_train.shape[0] > subsample_size:
indices = np.arange(X_binned_train.shape[0])
stratify = y_train if is_classifier(self) else None
indices = resample(
indices,
n_samples=subsample_size,
replace=False,
random_state=seed,
stratify=stratify,
)
X_binned_small_train = X_binned_train[indices]
y_small_train = y_train[indices]
if sample_weight_train is not None:
sample_weight_small_train = sample_weight_train[indices]
else:
sample_weight_small_train = None
X_binned_small_train = np.ascontiguousarray(X_binned_small_train)
return (X_binned_small_train, y_small_train, sample_weight_small_train)
else:
return X_binned_train, y_train, sample_weight_train
def _check_early_stopping_scorer(
self,
X_binned_small_train,
y_small_train,
sample_weight_small_train,
X_binned_val,
y_val,
sample_weight_val,
):
"""Check if fitting should be early-stopped based on scorer.
Scores are computed on validation data or on training data.
"""
if is_classifier(self):
y_small_train = self.classes_[y_small_train.astype(int)]
if sample_weight_small_train is None:
self.train_score_.append(
self._scorer(self, X_binned_small_train, y_small_train)
)
else:
self.train_score_.append(
self._scorer(
self,
X_binned_small_train,
y_small_train,
sample_weight=sample_weight_small_train,
)
)
if self._use_validation_data:
if is_classifier(self):
y_val = self.classes_[y_val.astype(int)]
if sample_weight_val is None:
self.validation_score_.append(self._scorer(self, X_binned_val, y_val))
else:
self.validation_score_.append(
self._scorer(
self, X_binned_val, y_val, sample_weight=sample_weight_val
)
)
return self._should_stop(self.validation_score_)
else:
return self._should_stop(self.train_score_)
def _check_early_stopping_loss(
self,
raw_predictions,
y_train,
sample_weight_train,
raw_predictions_val,
y_val,
sample_weight_val,
n_threads=1,
):
"""Check if fitting should be early-stopped based on loss.
Scores are computed on validation data or on training data.
"""
self.train_score_.append(
-self._loss(
y_true=y_train,
raw_prediction=raw_predictions,
sample_weight=sample_weight_train,
n_threads=n_threads,
)
)
if self._use_validation_data:
self.validation_score_.append(
-self._loss(
y_true=y_val,
raw_prediction=raw_predictions_val,
sample_weight=sample_weight_val,
n_threads=n_threads,
)
)
return self._should_stop(self.validation_score_)
else:
return self._should_stop(self.train_score_)
def _should_stop(self, scores):
"""
Return True (do early stopping) if the last n scores aren't better
than the (n-1)th-to-last score, up to some tolerance.
"""
reference_position = self.n_iter_no_change + 1
if len(scores) < reference_position:
return False
# A higher score is always better. Higher tol means that it will be
# harder for subsequent iteration to be considered an improvement upon
# the reference score, and therefore it is more likely to early stop
# because of the lack of significant improvement.
reference_score = scores[-reference_position] + self.tol
recent_scores = scores[-reference_position + 1 :]
recent_improvements = [score > reference_score for score in recent_scores]
return not any(recent_improvements)
def _bin_data(self, X, is_training_data):
"""Bin data X.
If is_training_data, then fit the _bin_mapper attribute.
Else, the binned data is converted to a C-contiguous array.
"""
description = "training" if is_training_data else "validation"
if self.verbose:
print(
"Binning {:.3f} GB of {} data: ".format(X.nbytes / 1e9, description),
end="",
flush=True,
)
tic = time()
if is_training_data:
X_binned = self._bin_mapper.fit_transform(X) # F-aligned array
else:
X_binned = self._bin_mapper.transform(X) # F-aligned array
# We convert the array to C-contiguous since predicting is faster
# with this layout (training is faster on F-arrays though)
X_binned = np.ascontiguousarray(X_binned)
toc = time()
if self.verbose:
duration = toc - tic
print("{:.3f} s".format(duration))
return X_binned
def _print_iteration_stats(self, iteration_start_time):
"""Print info about the current fitting iteration."""
log_msg = ""
predictors_of_ith_iteration = [
predictors_list
for predictors_list in self._predictors[-1]
if predictors_list
]
n_trees = len(predictors_of_ith_iteration)
max_depth = max(
predictor.get_max_depth() for predictor in predictors_of_ith_iteration
)
n_leaves = sum(
predictor.get_n_leaf_nodes() for predictor in predictors_of_ith_iteration
)
if n_trees == 1:
log_msg += "{} tree, {} leaves, ".format(n_trees, n_leaves)
else:
log_msg += "{} trees, {} leaves ".format(n_trees, n_leaves)
log_msg += "({} on avg), ".format(int(n_leaves / n_trees))
log_msg += "max depth = {}, ".format(max_depth)
if self.do_early_stopping_:
if self.scoring == "loss":
factor = -1 # score_ arrays contain the negative loss
name = "loss"
else:
factor = 1
name = "score"
log_msg += "train {}: {:.5f}, ".format(name, factor * self.train_score_[-1])
if self._use_validation_data:
log_msg += "val {}: {:.5f}, ".format(
name, factor * self.validation_score_[-1]
)
iteration_time = time() - iteration_start_time
log_msg += "in {:0.3f}s".format(iteration_time)
print(log_msg)
def _raw_predict(self, X, n_threads=None):
"""Return the sum of the leaves values over all predictors.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input samples.
n_threads : int, default=None
Number of OpenMP threads to use. `_openmp_effective_n_threads` is called
to determine the effective number of threads use, which takes cgroups CPU
quotes into account. See the docstring of `_openmp_effective_n_threads`
for details.
Returns
-------
raw_predictions : array, shape (n_samples, n_trees_per_iteration)
The raw predicted values.
"""
is_binned = getattr(self, "_in_fit", False)
if not is_binned:
X = self._validate_data(
X, dtype=X_DTYPE, force_all_finite=False, reset=False
)
check_is_fitted(self)
if X.shape[1] != self._n_features:
raise ValueError(
"X has {} features but this estimator was trained with "
"{} features.".format(X.shape[1], self._n_features)
)
n_samples = X.shape[0]
raw_predictions = np.zeros(
shape=(n_samples, self.n_trees_per_iteration_),
dtype=self._baseline_prediction.dtype,
order="F",
)
raw_predictions += self._baseline_prediction
# We intentionally decouple the number of threads used at prediction
# time from the number of threads used at fit time because the model
# can be deployed on a different machine for prediction purposes.
n_threads = _openmp_effective_n_threads(n_threads)
self._predict_iterations(
X, self._predictors, raw_predictions, is_binned, n_threads
)
return raw_predictions
def _predict_iterations(self, X, predictors, raw_predictions, is_binned, n_threads):
"""Add the predictions of the predictors to raw_predictions."""
if not is_binned:
(
known_cat_bitsets,
f_idx_map,
) = self._bin_mapper.make_known_categories_bitsets()
for predictors_of_ith_iteration in predictors:
for k, predictor in enumerate(predictors_of_ith_iteration):
if is_binned:
predict = partial(
predictor.predict_binned,
missing_values_bin_idx=self._bin_mapper.missing_values_bin_idx_,
n_threads=n_threads,
)
else:
predict = partial(
predictor.predict,
known_cat_bitsets=known_cat_bitsets,
f_idx_map=f_idx_map,
n_threads=n_threads,
)
raw_predictions[:, k] += predict(X)
def _staged_raw_predict(self, X):
"""Compute raw predictions of ``X`` for each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape (n_samples, n_features)