File: test_warm_start.py

package info (click to toggle)
scikit-learn 1.4.2%2Bdfsg-8
  • links: PTS, VCS
  • area: main
  • in suites: forky, sid, trixie
  • size: 25,036 kB
  • sloc: python: 201,105; cpp: 5,790; ansic: 854; makefile: 304; sh: 56; javascript: 20
file content (231 lines) | stat: -rw-r--r-- 7,933 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
import numpy as np
import pytest
from numpy.testing import assert_allclose, assert_array_equal

from sklearn.base import clone
from sklearn.datasets import make_classification, make_regression
from sklearn.ensemble import (
    HistGradientBoostingClassifier,
    HistGradientBoostingRegressor,
)
from sklearn.metrics import check_scoring

X_classification, y_classification = make_classification(random_state=0)
X_regression, y_regression = make_regression(random_state=0)


def _assert_predictor_equal(gb_1, gb_2, X):
    """Assert that two HistGBM instances are identical."""
    # Check identical nodes for each tree
    for pred_ith_1, pred_ith_2 in zip(gb_1._predictors, gb_2._predictors):
        for predictor_1, predictor_2 in zip(pred_ith_1, pred_ith_2):
            assert_array_equal(predictor_1.nodes, predictor_2.nodes)

    # Check identical predictions
    assert_allclose(gb_1.predict(X), gb_2.predict(X))


@pytest.mark.parametrize(
    "GradientBoosting, X, y",
    [
        (HistGradientBoostingClassifier, X_classification, y_classification),
        (HistGradientBoostingRegressor, X_regression, y_regression),
    ],
)
def test_max_iter_with_warm_start_validation(GradientBoosting, X, y):
    # Check that a ValueError is raised when the maximum number of iterations
    # is smaller than the number of iterations from the previous fit when warm
    # start is True.

    estimator = GradientBoosting(max_iter=10, early_stopping=False, warm_start=True)
    estimator.fit(X, y)
    estimator.set_params(max_iter=5)
    err_msg = (
        "max_iter=5 must be larger than or equal to n_iter_=10 when warm_start==True"
    )
    with pytest.raises(ValueError, match=err_msg):
        estimator.fit(X, y)


@pytest.mark.parametrize(
    "GradientBoosting, X, y",
    [
        (HistGradientBoostingClassifier, X_classification, y_classification),
        (HistGradientBoostingRegressor, X_regression, y_regression),
    ],
)
def test_warm_start_yields_identical_results(GradientBoosting, X, y):
    # Make sure that fitting 50 iterations and then 25 with warm start is
    # equivalent to fitting 75 iterations.

    rng = 42
    gb_warm_start = GradientBoosting(
        n_iter_no_change=100, max_iter=50, random_state=rng, warm_start=True
    )
    gb_warm_start.fit(X, y).set_params(max_iter=75).fit(X, y)

    gb_no_warm_start = GradientBoosting(
        n_iter_no_change=100, max_iter=75, random_state=rng, warm_start=False
    )
    gb_no_warm_start.fit(X, y)

    # Check that both predictors are equal
    _assert_predictor_equal(gb_warm_start, gb_no_warm_start, X)


@pytest.mark.parametrize(
    "GradientBoosting, X, y",
    [
        (HistGradientBoostingClassifier, X_classification, y_classification),
        (HistGradientBoostingRegressor, X_regression, y_regression),
    ],
)
def test_warm_start_max_depth(GradientBoosting, X, y):
    # Test if possible to fit trees of different depth in ensemble.
    gb = GradientBoosting(
        max_iter=20,
        min_samples_leaf=1,
        warm_start=True,
        max_depth=2,
        early_stopping=False,
    )
    gb.fit(X, y)
    gb.set_params(max_iter=30, max_depth=3, n_iter_no_change=110)
    gb.fit(X, y)

    # First 20 trees have max_depth == 2
    for i in range(20):
        assert gb._predictors[i][0].get_max_depth() == 2
    # Last 10 trees have max_depth == 3
    for i in range(1, 11):
        assert gb._predictors[-i][0].get_max_depth() == 3


@pytest.mark.parametrize(
    "GradientBoosting, X, y",
    [
        (HistGradientBoostingClassifier, X_classification, y_classification),
        (HistGradientBoostingRegressor, X_regression, y_regression),
    ],
)
@pytest.mark.parametrize("scoring", (None, "loss"))
def test_warm_start_early_stopping(GradientBoosting, X, y, scoring):
    # Make sure that early stopping occurs after a small number of iterations
    # when fitting a second time with warm starting.

    n_iter_no_change = 5
    gb = GradientBoosting(
        n_iter_no_change=n_iter_no_change,
        max_iter=10000,
        early_stopping=True,
        random_state=42,
        warm_start=True,
        tol=1e-3,
        scoring=scoring,
    )
    gb.fit(X, y)
    n_iter_first_fit = gb.n_iter_
    gb.fit(X, y)
    n_iter_second_fit = gb.n_iter_
    assert 0 < n_iter_second_fit - n_iter_first_fit < n_iter_no_change


@pytest.mark.parametrize(
    "GradientBoosting, X, y",
    [
        (HistGradientBoostingClassifier, X_classification, y_classification),
        (HistGradientBoostingRegressor, X_regression, y_regression),
    ],
)
def test_warm_start_equal_n_estimators(GradientBoosting, X, y):
    # Test if warm start with equal n_estimators does nothing
    gb_1 = GradientBoosting(max_depth=2, early_stopping=False)
    gb_1.fit(X, y)

    gb_2 = clone(gb_1)
    gb_2.set_params(max_iter=gb_1.max_iter, warm_start=True, n_iter_no_change=5)
    gb_2.fit(X, y)

    # Check that both predictors are equal
    _assert_predictor_equal(gb_1, gb_2, X)


@pytest.mark.parametrize(
    "GradientBoosting, X, y",
    [
        (HistGradientBoostingClassifier, X_classification, y_classification),
        (HistGradientBoostingRegressor, X_regression, y_regression),
    ],
)
def test_warm_start_clear(GradientBoosting, X, y):
    # Test if fit clears state.
    gb_1 = GradientBoosting(n_iter_no_change=5, random_state=42)
    gb_1.fit(X, y)

    gb_2 = GradientBoosting(n_iter_no_change=5, random_state=42, warm_start=True)
    gb_2.fit(X, y)  # inits state
    gb_2.set_params(warm_start=False)
    gb_2.fit(X, y)  # clears old state and equals est

    # Check that both predictors have the same train_score_ and
    # validation_score_ attributes
    assert_allclose(gb_1.train_score_, gb_2.train_score_)
    assert_allclose(gb_1.validation_score_, gb_2.validation_score_)

    # Check that both predictors are equal
    _assert_predictor_equal(gb_1, gb_2, X)


@pytest.mark.parametrize(
    "GradientBoosting, X, y",
    [
        (HistGradientBoostingClassifier, X_classification, y_classification),
        (HistGradientBoostingRegressor, X_regression, y_regression),
    ],
)
@pytest.mark.parametrize("rng_type", ("none", "int", "instance"))
def test_random_seeds_warm_start(GradientBoosting, X, y, rng_type):
    # Make sure the seeds for train/val split and small trainset subsampling
    # are correctly set in a warm start context.
    def _get_rng(rng_type):
        # Helper to avoid consuming rngs
        if rng_type == "none":
            return None
        elif rng_type == "int":
            return 42
        else:
            return np.random.RandomState(0)

    random_state = _get_rng(rng_type)
    gb_1 = GradientBoosting(early_stopping=True, max_iter=2, random_state=random_state)
    gb_1.set_params(scoring=check_scoring(gb_1))
    gb_1.fit(X, y)
    random_seed_1_1 = gb_1._random_seed

    gb_1.fit(X, y)
    random_seed_1_2 = gb_1._random_seed  # clear the old state, different seed

    random_state = _get_rng(rng_type)
    gb_2 = GradientBoosting(
        early_stopping=True, max_iter=2, random_state=random_state, warm_start=True
    )
    gb_2.set_params(scoring=check_scoring(gb_2))
    gb_2.fit(X, y)  # inits state
    random_seed_2_1 = gb_2._random_seed
    gb_2.fit(X, y)  # clears old state and equals est
    random_seed_2_2 = gb_2._random_seed

    # Without warm starting, the seeds should be
    # * all different if random state is None
    # * all equal if random state is an integer
    # * different when refitting and equal with a new estimator (because
    #   the random state is mutated)
    if rng_type == "none":
        assert random_seed_1_1 != random_seed_1_2 != random_seed_2_1
    elif rng_type == "int":
        assert random_seed_1_1 == random_seed_1_2 == random_seed_2_1
    else:
        assert random_seed_1_1 == random_seed_2_1 != random_seed_1_2

    # With warm starting, the seeds must be equal
    assert random_seed_2_1 == random_seed_2_2