test_engine.py 191 KB
Newer Older
Guolin Ke's avatar
Guolin Ke committed
1
# coding: utf-8
wxchan's avatar
wxchan committed
2
import copy
3
import itertools
4
import json
wxchan's avatar
wxchan committed
5
import math
6
import pickle
7
import platform
8
import random
9
import re
10
from os import getenv
11
from pathlib import Path
12
from shutil import copyfile
wxchan's avatar
wxchan committed
13
14

import numpy as np
15
import psutil
16
import pytest
17
from scipy.sparse import csr_matrix, isspmatrix_csc, isspmatrix_csr
18
from sklearn.datasets import load_svmlight_file, make_blobs, make_classification, make_multilabel_classification
19
20
21
22
23
24
25
26
from sklearn.metrics import (
    average_precision_score,
    log_loss,
    mean_absolute_error,
    mean_squared_error,
    r2_score,
    roc_auc_score,
)
27
from sklearn.model_selection import GroupKFold, TimeSeriesSplit, train_test_split
wxchan's avatar
wxchan committed
28

29
import lightgbm as lgb
30
from lightgbm.compat import PANDAS_INSTALLED, pd_DataFrame, pd_Series
31

32
33
from .utils import (
    SERIALIZERS,
34
    assert_all_trees_valid,
35
    assert_silent,
36
37
38
39
40
41
42
    dummy_obj,
    load_breast_cancer,
    load_digits,
    load_iris,
    logistic_sigmoid,
    make_synthetic_regression,
    mse_obj,
43
    np_assert_array_equal,
44
45
46
47
    pickle_and_unpickle_object,
    sklearn_multiclass_custom_objective,
    softmax,
)
wxchan's avatar
wxchan committed
48

49
50
51
decreasing_generator = itertools.count(0, -1)


52
53
54
55
56
57
def logloss_obj(preds, train_data):
    y_true = train_data.get_label()
    y_pred = logistic_sigmoid(preds)
    grad = y_pred - y_true
    hess = y_pred * (1.0 - y_pred)
    return grad, hess
58
59


wxchan's avatar
wxchan committed
60
61
62
def multi_logloss(y_true, y_pred):
    return np.mean([-math.log(y_pred[i][y]) for i, y in enumerate(y_true)])

wxchan's avatar
wxchan committed
63

Belinda Trotta's avatar
Belinda Trotta committed
64
65
66
67
68
69
70
def top_k_error(y_true, y_pred, k):
    if k == y_pred.shape[1]:
        return 0
    max_rest = np.max(-np.partition(-y_pred, k)[:, k:], axis=1)
    return 1 - np.mean((y_pred[np.arange(len(y_true)), y_true] > max_rest))


71
def constant_metric(preds, train_data):
72
    return ("error", 0.0, False)
73
74


75
76
77
78
79
80
81
def constant_metric_multi(preds, train_data):
    return [
        ("important_metric", 1.5, False),
        ("irrelevant_metric", 7.8, False),
    ]


82
def decreasing_metric(preds, train_data):
83
    return ("decreasing_metric", next(decreasing_generator), False)
84
85


86
87
88
89
def categorize(continuous_x):
    return np.digitize(continuous_x, bins=np.arange(0, 1, 0.01))


90
91
92
93
def test_binary():
    X, y = load_breast_cancer(return_X_y=True)
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
    params = {
94
95
96
97
        "objective": "binary",
        "metric": "binary_logloss",
        "verbose": -1,
        "num_iteration": 50,  # test num_iteration in dict here
98
99
100
101
    }
    lgb_train = lgb.Dataset(X_train, y_train)
    lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train)
    evals_result = {}
102
    gbm = lgb.train(
103
        params, lgb_train, num_boost_round=20, valid_sets=lgb_eval, callbacks=[lgb.record_evaluation(evals_result)]
104
    )
105
106
    ret = log_loss(y_test, gbm.predict(X_test))
    assert ret < 0.14
107
108
    assert len(evals_result["valid_0"]["binary_logloss"]) == 50
    assert evals_result["valid_0"]["binary_logloss"][-1] == pytest.approx(ret)
109
110
111
112
113
114


def test_rf():
    X, y = load_breast_cancer(return_X_y=True)
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
    params = {
115
116
117
118
119
120
121
122
        "boosting_type": "rf",
        "objective": "binary",
        "bagging_freq": 1,
        "bagging_fraction": 0.5,
        "feature_fraction": 0.5,
        "num_leaves": 50,
        "metric": "binary_logloss",
        "verbose": -1,
123
124
125
126
    }
    lgb_train = lgb.Dataset(X_train, y_train)
    lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train)
    evals_result = {}
127
    gbm = lgb.train(
128
        params, lgb_train, num_boost_round=50, valid_sets=lgb_eval, callbacks=[lgb.record_evaluation(evals_result)]
129
    )
130
131
    ret = log_loss(y_test, gbm.predict(X_test))
    assert ret < 0.19
132
    assert evals_result["valid_0"]["binary_logloss"][-1] == pytest.approx(ret)
133
134


135
@pytest.mark.parametrize("objective", ["regression", "regression_l1", "huber", "fair", "poisson", "quantile"])
136
def test_regression(objective):
137
138
    X, y = make_synthetic_regression()
    y = np.abs(y)
139
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
140
    params = {"objective": objective, "metric": "l2", "verbose": -1}
141
142
143
    lgb_train = lgb.Dataset(X_train, y_train)
    lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train)
    evals_result = {}
144
    gbm = lgb.train(
145
        params, lgb_train, num_boost_round=50, valid_sets=lgb_eval, callbacks=[lgb.record_evaluation(evals_result)]
146
    )
147
    ret = mean_squared_error(y_test, gbm.predict(X_test))
148
    if objective == "huber":
149
        assert ret < 430
150
    elif objective == "fair":
151
        assert ret < 296
152
    elif objective == "poisson":
153
        assert ret < 193
154
    elif objective == "quantile":
155
        assert ret < 1311
156
    else:
157
        assert ret < 343
158
    assert evals_result["valid_0"]["l2"][-1] == pytest.approx(ret)
159
160
161
162
163
164
165
166
167
168
169
170


def test_missing_value_handle():
    X_train = np.zeros((100, 1))
    y_train = np.zeros(100)
    trues = random.sample(range(100), 20)
    for idx in trues:
        X_train[idx, 0] = np.nan
        y_train[idx] = 1
    lgb_train = lgb.Dataset(X_train, y_train)
    lgb_eval = lgb.Dataset(X_train, y_train)

171
    params = {"metric": "l2", "verbose": -1, "boost_from_average": False}
172
    evals_result = {}
173
    gbm = lgb.train(
174
        params, lgb_train, num_boost_round=20, valid_sets=lgb_eval, callbacks=[lgb.record_evaluation(evals_result)]
175
    )
176
177
    ret = mean_squared_error(y_train, gbm.predict(X_train))
    assert ret < 0.005
178
    assert evals_result["valid_0"]["l2"][-1] == pytest.approx(ret)
179
180
181
182
183
184
185
186
187
188
189
190


def test_missing_value_handle_more_na():
    X_train = np.ones((100, 1))
    y_train = np.ones(100)
    trues = random.sample(range(100), 80)
    for idx in trues:
        X_train[idx, 0] = np.nan
        y_train[idx] = 0
    lgb_train = lgb.Dataset(X_train, y_train)
    lgb_eval = lgb.Dataset(X_train, y_train)

191
    params = {"metric": "l2", "verbose": -1, "boost_from_average": False}
192
    evals_result = {}
193
    gbm = lgb.train(
194
        params, lgb_train, num_boost_round=20, valid_sets=lgb_eval, callbacks=[lgb.record_evaluation(evals_result)]
195
    )
196
197
    ret = mean_squared_error(y_train, gbm.predict(X_train))
    assert ret < 0.005
198
    assert evals_result["valid_0"]["l2"][-1] == pytest.approx(ret)
199
200
201
202
203
204
205
206
207
208
209
210


def test_missing_value_handle_na():
    x = [0, 1, 2, 3, 4, 5, 6, 7, np.nan]
    y = [1, 1, 1, 1, 0, 0, 0, 0, 1]

    X_train = np.array(x).reshape(len(x), 1)
    y_train = np.array(y)
    lgb_train = lgb.Dataset(X_train, y_train)
    lgb_eval = lgb.Dataset(X_train, y_train)

    params = {
211
212
213
214
215
216
217
218
219
        "objective": "regression",
        "metric": "auc",
        "verbose": -1,
        "boost_from_average": False,
        "min_data": 1,
        "num_leaves": 2,
        "learning_rate": 1,
        "min_data_in_bin": 1,
        "zero_as_missing": False,
220
221
    }
    evals_result = {}
222
    gbm = lgb.train(
223
        params, lgb_train, num_boost_round=1, valid_sets=lgb_eval, callbacks=[lgb.record_evaluation(evals_result)]
224
    )
225
226
227
228
    pred = gbm.predict(X_train)
    np.testing.assert_allclose(pred, y)
    ret = roc_auc_score(y_train, pred)
    assert ret > 0.999
229
    assert evals_result["valid_0"]["auc"][-1] == pytest.approx(ret)
230
231
232
233
234
235
236
237
238
239
240
241


def test_missing_value_handle_zero():
    x = [0, 1, 2, 3, 4, 5, 6, 7, np.nan]
    y = [0, 1, 1, 1, 0, 0, 0, 0, 0]

    X_train = np.array(x).reshape(len(x), 1)
    y_train = np.array(y)
    lgb_train = lgb.Dataset(X_train, y_train)
    lgb_eval = lgb.Dataset(X_train, y_train)

    params = {
242
243
244
245
246
247
248
249
250
        "objective": "regression",
        "metric": "auc",
        "verbose": -1,
        "boost_from_average": False,
        "min_data": 1,
        "num_leaves": 2,
        "learning_rate": 1,
        "min_data_in_bin": 1,
        "zero_as_missing": True,
251
252
    }
    evals_result = {}
253
    gbm = lgb.train(
254
        params, lgb_train, num_boost_round=1, valid_sets=lgb_eval, callbacks=[lgb.record_evaluation(evals_result)]
255
    )
256
257
258
259
    pred = gbm.predict(X_train)
    np.testing.assert_allclose(pred, y)
    ret = roc_auc_score(y_train, pred)
    assert ret > 0.999
260
    assert evals_result["valid_0"]["auc"][-1] == pytest.approx(ret)
261
262
263
264
265
266
267
268
269
270
271
272


def test_missing_value_handle_none():
    x = [0, 1, 2, 3, 4, 5, 6, 7, np.nan]
    y = [0, 1, 1, 1, 0, 0, 0, 0, 0]

    X_train = np.array(x).reshape(len(x), 1)
    y_train = np.array(y)
    lgb_train = lgb.Dataset(X_train, y_train)
    lgb_eval = lgb.Dataset(X_train, y_train)

    params = {
273
274
275
276
277
278
279
280
281
        "objective": "regression",
        "metric": "auc",
        "verbose": -1,
        "boost_from_average": False,
        "min_data": 1,
        "num_leaves": 2,
        "learning_rate": 1,
        "min_data_in_bin": 1,
        "use_missing": False,
282
283
    }
    evals_result = {}
284
    gbm = lgb.train(
285
        params, lgb_train, num_boost_round=1, valid_sets=lgb_eval, callbacks=[lgb.record_evaluation(evals_result)]
286
    )
287
288
289
290
291
    pred = gbm.predict(X_train)
    assert pred[0] == pytest.approx(pred[1])
    assert pred[-1] == pytest.approx(pred[0])
    ret = roc_auc_score(y_train, pred)
    assert ret > 0.83
292
    assert evals_result["valid_0"]["auc"][-1] == pytest.approx(ret)
293
294


295
296
297
298
299
300
301
302
303
304
305
306
307
308
@pytest.mark.parametrize(
    "use_quantized_grad",
    [
        pytest.param(
            True,
            marks=pytest.mark.skipif(
                getenv("TASK", "") == "cuda",
                reason="Skip because quantized training with categorical features is not supported for cuda version",
            ),
        ),
        False,
    ],
)
def test_categorical_handle(use_quantized_grad):
309
310
311
312
313
314
315
316
317
    x = [0, 1, 2, 3, 4, 5, 6, 7]
    y = [0, 1, 0, 1, 0, 1, 0, 1]

    X_train = np.array(x).reshape(len(x), 1)
    y_train = np.array(y)
    lgb_train = lgb.Dataset(X_train, y_train)
    lgb_eval = lgb.Dataset(X_train, y_train)

    params = {
318
319
320
321
322
323
324
325
326
327
328
329
330
331
        "objective": "regression",
        "metric": "auc",
        "verbose": -1,
        "boost_from_average": False,
        "min_data": 1,
        "num_leaves": 2,
        "learning_rate": 1,
        "min_data_in_bin": 1,
        "min_data_per_group": 1,
        "cat_smooth": 1,
        "cat_l2": 0,
        "max_cat_to_onehot": 1,
        "zero_as_missing": True,
        "categorical_column": 0,
332
        "use_quantized_grad": use_quantized_grad,
333
334
    }
    evals_result = {}
335
    gbm = lgb.train(
336
        params, lgb_train, num_boost_round=1, valid_sets=lgb_eval, callbacks=[lgb.record_evaluation(evals_result)]
337
    )
338
339
340
341
    pred = gbm.predict(X_train)
    np.testing.assert_allclose(pred, y)
    ret = roc_auc_score(y_train, pred)
    assert ret > 0.999
342
    assert evals_result["valid_0"]["auc"][-1] == pytest.approx(ret)
343
344


345
346
347
348
349
350
351
352
353
354
355
356
357
358
@pytest.mark.parametrize(
    "use_quantized_grad",
    [
        pytest.param(
            True,
            marks=pytest.mark.skipif(
                getenv("TASK", "") == "cuda",
                reason="Skip because quantized training with categorical features is not supported for cuda version",
            ),
        ),
        False,
    ],
)
def test_categorical_handle_na(use_quantized_grad):
359
360
361
362
363
364
365
366
367
    x = [0, np.nan, 0, np.nan, 0, np.nan]
    y = [0, 1, 0, 1, 0, 1]

    X_train = np.array(x).reshape(len(x), 1)
    y_train = np.array(y)
    lgb_train = lgb.Dataset(X_train, y_train)
    lgb_eval = lgb.Dataset(X_train, y_train)

    params = {
368
369
370
371
372
373
374
375
376
377
378
379
380
381
        "objective": "regression",
        "metric": "auc",
        "verbose": -1,
        "boost_from_average": False,
        "min_data": 1,
        "num_leaves": 2,
        "learning_rate": 1,
        "min_data_in_bin": 1,
        "min_data_per_group": 1,
        "cat_smooth": 1,
        "cat_l2": 0,
        "max_cat_to_onehot": 1,
        "zero_as_missing": False,
        "categorical_column": 0,
382
        "use_quantized_grad": use_quantized_grad,
383
384
    }
    evals_result = {}
385
    gbm = lgb.train(
386
        params, lgb_train, num_boost_round=1, valid_sets=lgb_eval, callbacks=[lgb.record_evaluation(evals_result)]
387
    )
388
389
390
391
    pred = gbm.predict(X_train)
    np.testing.assert_allclose(pred, y)
    ret = roc_auc_score(y_train, pred)
    assert ret > 0.999
392
    assert evals_result["valid_0"]["auc"][-1] == pytest.approx(ret)
393
394


395
396
397
398
399
400
401
402
403
404
405
406
407
408
@pytest.mark.parametrize(
    "use_quantized_grad",
    [
        pytest.param(
            True,
            marks=pytest.mark.skipif(
                getenv("TASK", "") == "cuda",
                reason="Skip because quantized training with categorical features is not supported for cuda version",
            ),
        ),
        False,
    ],
)
def test_categorical_non_zero_inputs(use_quantized_grad):
409
410
411
412
413
414
415
416
417
    x = [1, 1, 1, 1, 1, 1, 2, 2]
    y = [1, 1, 1, 1, 1, 1, 0, 0]

    X_train = np.array(x).reshape(len(x), 1)
    y_train = np.array(y)
    lgb_train = lgb.Dataset(X_train, y_train)
    lgb_eval = lgb.Dataset(X_train, y_train)

    params = {
418
419
420
421
422
423
424
425
426
427
428
429
430
431
        "objective": "regression",
        "metric": "auc",
        "verbose": -1,
        "boost_from_average": False,
        "min_data": 1,
        "num_leaves": 2,
        "learning_rate": 1,
        "min_data_in_bin": 1,
        "min_data_per_group": 1,
        "cat_smooth": 1,
        "cat_l2": 0,
        "max_cat_to_onehot": 1,
        "zero_as_missing": False,
        "categorical_column": 0,
432
        "use_quantized_grad": use_quantized_grad,
433
434
    }
    evals_result = {}
435
    gbm = lgb.train(
436
        params, lgb_train, num_boost_round=1, valid_sets=lgb_eval, callbacks=[lgb.record_evaluation(evals_result)]
437
    )
438
439
440
441
    pred = gbm.predict(X_train)
    np.testing.assert_allclose(pred, y)
    ret = roc_auc_score(y_train, pred)
    assert ret > 0.999
442
    assert evals_result["valid_0"]["auc"][-1] == pytest.approx(ret)
443
444
445
446
447


def test_multiclass():
    X, y = load_digits(n_class=10, return_X_y=True)
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
448
    params = {"objective": "multiclass", "metric": "multi_logloss", "num_class": 10, "verbose": -1}
449
450
451
    lgb_train = lgb.Dataset(X_train, y_train, params=params)
    lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train, params=params)
    evals_result = {}
452
    gbm = lgb.train(
453
        params, lgb_train, num_boost_round=50, valid_sets=lgb_eval, callbacks=[lgb.record_evaluation(evals_result)]
454
    )
455
456
    ret = multi_logloss(y_test, gbm.predict(X_test))
    assert ret < 0.16
457
    assert evals_result["valid_0"]["multi_logloss"][-1] == pytest.approx(ret)
458
459
460
461
462
463


def test_multiclass_rf():
    X, y = load_digits(n_class=10, return_X_y=True)
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
    params = {
464
465
466
467
468
469
470
471
472
473
474
        "boosting_type": "rf",
        "objective": "multiclass",
        "metric": "multi_logloss",
        "bagging_freq": 1,
        "bagging_fraction": 0.6,
        "feature_fraction": 0.6,
        "num_class": 10,
        "num_leaves": 50,
        "min_data": 1,
        "verbose": -1,
        "gpu_use_dp": True,
475
476
477
478
    }
    lgb_train = lgb.Dataset(X_train, y_train, params=params)
    lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train, params=params)
    evals_result = {}
479
    gbm = lgb.train(
480
        params, lgb_train, num_boost_round=50, valid_sets=lgb_eval, callbacks=[lgb.record_evaluation(evals_result)]
481
    )
482
483
    ret = multi_logloss(y_test, gbm.predict(X_test))
    assert ret < 0.23
484
    assert evals_result["valid_0"]["multi_logloss"][-1] == pytest.approx(ret)
485
486
487
488
489


def test_multiclass_prediction_early_stopping():
    X, y = load_digits(n_class=10, return_X_y=True)
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
490
    params = {"objective": "multiclass", "metric": "multi_logloss", "num_class": 10, "verbose": -1}
491
    lgb_train = lgb.Dataset(X_train, y_train, params=params)
492
    gbm = lgb.train(params, lgb_train, num_boost_round=50)
493

494
    pred_parameter = {"pred_early_stop": True, "pred_early_stop_freq": 5, "pred_early_stop_margin": 1.5}
495
496
497
498
    ret = multi_logloss(y_test, gbm.predict(X_test, **pred_parameter))
    assert ret < 0.8
    assert ret > 0.6  # loss will be higher than when evaluating the full model

499
    pred_parameter["pred_early_stop_margin"] = 5.5
500
501
502
503
504
505
    ret = multi_logloss(y_test, gbm.predict(X_test, **pred_parameter))
    assert ret < 0.2


def test_multi_class_error():
    X, y = load_digits(n_class=10, return_X_y=True)
506
    params = {"objective": "multiclass", "num_classes": 10, "metric": "multi_error", "num_leaves": 4, "verbose": -1}
507
508
509
510
    lgb_data = lgb.Dataset(X, label=y)
    est = lgb.train(params, lgb_data, num_boost_round=10)
    predict_default = est.predict(X)
    results = {}
511
    est = lgb.train(
512
        dict(params, multi_error_top_k=1),
513
514
515
        lgb_data,
        num_boost_round=10,
        valid_sets=[lgb_data],
516
        callbacks=[lgb.record_evaluation(results)],
517
    )
518
519
520
521
522
    predict_1 = est.predict(X)
    # check that default gives same result as k = 1
    np.testing.assert_allclose(predict_1, predict_default)
    # check against independent calculation for k = 1
    err = top_k_error(y, predict_1, 1)
523
    assert results["training"]["multi_error"][-1] == pytest.approx(err)
524
525
    # check against independent calculation for k = 2
    results = {}
526
    est = lgb.train(
527
        dict(params, multi_error_top_k=2),
528
529
530
        lgb_data,
        num_boost_round=10,
        valid_sets=[lgb_data],
531
        callbacks=[lgb.record_evaluation(results)],
532
    )
533
534
    predict_2 = est.predict(X)
    err = top_k_error(y, predict_2, 2)
535
    assert results["training"]["multi_error@2"][-1] == pytest.approx(err)
536
537
    # check against independent calculation for k = 10
    results = {}
538
    est = lgb.train(
539
        dict(params, multi_error_top_k=10),
540
541
542
        lgb_data,
        num_boost_round=10,
        valid_sets=[lgb_data],
543
        callbacks=[lgb.record_evaluation(results)],
544
    )
545
546
    predict_3 = est.predict(X)
    err = top_k_error(y, predict_3, 10)
547
    assert results["training"]["multi_error@10"][-1] == pytest.approx(err)
548
549
550
551
    # check cases where predictions are equal
    X = np.array([[0, 0], [0, 0]])
    y = np.array([0, 1])
    lgb_data = lgb.Dataset(X, label=y)
552
    params["num_classes"] = 2
553
    results = {}
554
555
    lgb.train(params, lgb_data, num_boost_round=10, valid_sets=[lgb_data], callbacks=[lgb.record_evaluation(results)])
    assert results["training"]["multi_error"][-1] == pytest.approx(1)
556
    results = {}
557
    lgb.train(
558
        dict(params, multi_error_top_k=2),
559
560
561
        lgb_data,
        num_boost_round=10,
        valid_sets=[lgb_data],
562
        callbacks=[lgb.record_evaluation(results)],
563
    )
564
    assert results["training"]["multi_error@2"][-1] == pytest.approx(0)
565
566


567
568
569
@pytest.mark.skipif(
    getenv("TASK", "") == "cuda", reason="Skip due to differences in implementation details of CUDA version"
)
570
def test_auc_mu(rng):
571
572
573
574
575
    # should give same result as binary auc for 2 classes
    X, y = load_digits(n_class=10, return_X_y=True)
    y_new = np.zeros((len(y)))
    y_new[y != 0] = 1
    lgb_X = lgb.Dataset(X, label=y_new)
576
    params = {"objective": "multiclass", "metric": "auc_mu", "verbose": -1, "num_classes": 2, "seed": 0}
577
    results_auc_mu = {}
578
579
    lgb.train(params, lgb_X, num_boost_round=10, valid_sets=[lgb_X], callbacks=[lgb.record_evaluation(results_auc_mu)])
    params = {"objective": "binary", "metric": "auc", "verbose": -1, "seed": 0}
580
    results_auc = {}
581
582
    lgb.train(params, lgb_X, num_boost_round=10, valid_sets=[lgb_X], callbacks=[lgb.record_evaluation(results_auc)])
    np.testing.assert_allclose(results_auc_mu["training"]["auc_mu"], results_auc["training"]["auc"])
583
584
    # test the case where all predictions are equal
    lgb_X = lgb.Dataset(X[:10], label=y_new[:10])
585
586
587
588
589
590
591
592
    params = {
        "objective": "multiclass",
        "metric": "auc_mu",
        "verbose": -1,
        "num_classes": 2,
        "min_data_in_leaf": 20,
        "seed": 0,
    }
593
    results_auc_mu = {}
594
595
    lgb.train(params, lgb_X, num_boost_round=10, valid_sets=[lgb_X], callbacks=[lgb.record_evaluation(results_auc_mu)])
    assert results_auc_mu["training"]["auc_mu"][-1] == pytest.approx(0.5)
596
597
    # test that weighted data gives different auc_mu
    lgb_X = lgb.Dataset(X, label=y)
598
    lgb_X_weighted = lgb.Dataset(X, label=y, weight=np.abs(rng.standard_normal(size=y.shape)))
599
600
601
    results_unweighted = {}
    results_weighted = {}
    params = dict(params, num_classes=10, num_leaves=5)
602
    lgb.train(
603
        params, lgb_X, num_boost_round=10, valid_sets=[lgb_X], callbacks=[lgb.record_evaluation(results_unweighted)]
604
605
606
607
608
609
    )
    lgb.train(
        params,
        lgb_X_weighted,
        num_boost_round=10,
        valid_sets=[lgb_X_weighted],
610
        callbacks=[lgb.record_evaluation(results_weighted)],
611
    )
612
613
    assert results_weighted["training"]["auc_mu"][-1] < 1
    assert results_unweighted["training"]["auc_mu"][-1] != results_weighted["training"]["auc_mu"][-1]
614
615
    # test that equal data weights give same auc_mu as unweighted data
    lgb_X_weighted = lgb.Dataset(X, label=y, weight=np.ones(y.shape) * 0.5)
616
617
618
619
620
    lgb.train(
        params,
        lgb_X_weighted,
        num_boost_round=10,
        valid_sets=[lgb_X_weighted],
621
622
623
624
        callbacks=[lgb.record_evaluation(results_weighted)],
    )
    assert results_unweighted["training"]["auc_mu"][-1] == pytest.approx(
        results_weighted["training"]["auc_mu"][-1], abs=1e-5
625
    )
626
627
628
629
    # should give 1 when accuracy = 1
    X = X[:10, :]
    y = y[:10]
    lgb_X = lgb.Dataset(X, label=y)
630
    params = {"objective": "multiclass", "metric": "auc_mu", "num_classes": 10, "min_data_in_leaf": 1, "verbose": -1}
631
    results = {}
632
633
    lgb.train(params, lgb_X, num_boost_round=100, valid_sets=[lgb_X], callbacks=[lgb.record_evaluation(results)])
    assert results["training"]["auc_mu"][-1] == pytest.approx(1)
634
    # test loading class weights
635
    Xy = np.loadtxt(
636
        str(Path(__file__).absolute().parents[2] / "examples" / "multiclass_classification" / "multiclass.train")
637
    )
638
639
640
    y = Xy[:, 0]
    X = Xy[:, 1:]
    lgb_X = lgb.Dataset(X, label=y)
641
642
643
644
645
646
647
648
    params = {
        "objective": "multiclass",
        "metric": "auc_mu",
        "auc_mu_weights": [0, 2, 2, 2, 2, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0],
        "num_classes": 5,
        "verbose": -1,
        "seed": 0,
    }
649
    results_weight = {}
650
651
    lgb.train(params, lgb_X, num_boost_round=5, valid_sets=[lgb_X], callbacks=[lgb.record_evaluation(results_weight)])
    params["auc_mu_weights"] = []
652
    results_no_weight = {}
653
    lgb.train(
654
        params, lgb_X, num_boost_round=5, valid_sets=[lgb_X], callbacks=[lgb.record_evaluation(results_no_weight)]
655
    )
656
    assert results_weight["training"]["auc_mu"][-1] != results_no_weight["training"]["auc_mu"][-1]
657
658


659
def test_ranking_prediction_early_stopping():
660
661
662
663
664
    rank_example_dir = Path(__file__).absolute().parents[2] / "examples" / "lambdarank"
    X_train, y_train = load_svmlight_file(str(rank_example_dir / "rank.train"))
    q_train = np.loadtxt(str(rank_example_dir / "rank.train.query"))
    X_test, _ = load_svmlight_file(str(rank_example_dir / "rank.test"))
    params = {"objective": "rank_xendcg", "verbose": -1}
665
666
667
    lgb_train = lgb.Dataset(X_train, y_train, group=q_train, params=params)
    gbm = lgb.train(params, lgb_train, num_boost_round=50)

668
    pred_parameter = {"pred_early_stop": True, "pred_early_stop_freq": 5, "pred_early_stop_margin": 1.5}
669
670
671
672
    ret_early = gbm.predict(X_test, **pred_parameter)

    pred_parameter["pred_early_stop_margin"] = 5.5
    ret_early_more_strict = gbm.predict(X_test, **pred_parameter)
673
    with pytest.raises(AssertionError):  # noqa: PT011
674
675
676
        np.testing.assert_allclose(ret_early, ret_early_more_strict)


677
# Simulates position bias for a given ranking dataset.
678
# The output dataset is identical to the input one with the exception for the relevance labels.
679
680
681
682
683
# The new labels are generated according to an instance of a cascade user model:
# for each query, the user is simulated to be traversing the list of documents ranked by a baseline ranker
# (in our example it is simply the ordering by some feature correlated with relevance, e.g., 34)
# and clicks on that document (new_label=1) with some probability 'pclick' depending on its true relevance;
# at each position the user may stop the traversal with some probability pstop. For the non-clicked documents,
684
# new_label=0. Thus the generated new labels are biased towards the baseline ranker.
685
686
687
688
689
690
691
692
693
694
695
696
697
698
# The positions of the documents in the ranked lists produced by the baseline, are returned.
def simulate_position_bias(file_dataset_in, file_query_in, file_dataset_out, baseline_feature):
    # a mapping of a document's true relevance (defined on a 5-grade scale) into the probability of clicking it
    def get_pclick(label):
        if label == 0:
            return 0.4
        elif label == 1:
            return 0.6
        elif label == 2:
            return 0.7
        elif label == 3:
            return 0.8
        else:
            return 0.9
699

700
701
    # an instantiation of a cascade model where the user stops with probability 0.2 after observing each document
    pstop = 0.2
702

703
704
    f_dataset_in = open(file_dataset_in, "r")
    f_dataset_out = open(file_dataset_out, "w")
705
706
707
    random.seed(10)
    positions_all = []
    for line in open(file_query_in):
708
        docs_num = int(line)
709
        lines = []
710
        index_values = []
711
712
713
714
715
716
        positions = [0] * docs_num
        for index in range(docs_num):
            features = f_dataset_in.readline().split()
            lines.append(features)
            val = 0.0
            for feature_val in features:
717
                feature_val_split = feature_val.split(":")
718
719
720
721
                if int(feature_val_split[0]) == baseline_feature:
                    val = float(feature_val_split[1])
            index_values.append([index, val])
        index_values.sort(key=lambda x: -x[1])
722
        stop = False
723
724
725
726
727
728
729
        for pos in range(docs_num):
            index = index_values[pos][0]
            new_label = 0
            if not stop:
                label = int(lines[index][0])
                pclick = get_pclick(label)
                if random.random() < pclick:
730
                    new_label = 1
731
732
733
734
                stop = random.random() < pstop
            lines[index][0] = str(new_label)
            positions[index] = pos
        for features in lines:
735
            f_dataset_out.write(" ".join(features) + "\n")
736
737
738
739
740
        positions_all.extend(positions)
    f_dataset_out.close()
    return positions_all


741
742
743
@pytest.mark.skipif(
    getenv("TASK", "") == "cuda", reason="Positions in learning to rank is not supported in CUDA version yet"
)
744
def test_ranking_with_position_information_with_file(tmp_path):
745
    rank_example_dir = Path(__file__).absolute().parents[2] / "examples" / "lambdarank"
746
    params = {
747
748
749
750
751
752
753
754
        "objective": "lambdarank",
        "verbose": -1,
        "eval_at": [3],
        "metric": "ndcg",
        "bagging_freq": 1,
        "bagging_fraction": 0.9,
        "min_data_in_leaf": 50,
        "min_sum_hessian_in_leaf": 5.0,
755
756
757
    }

    # simulate position bias for the train dataset and put the train dataset with biased labels to temp directory
758
759
760
761
762
763
764
765
766
    positions = simulate_position_bias(
        str(rank_example_dir / "rank.train"),
        str(rank_example_dir / "rank.train.query"),
        str(tmp_path / "rank.train"),
        baseline_feature=34,
    )
    copyfile(str(rank_example_dir / "rank.train.query"), str(tmp_path / "rank.train.query"))
    copyfile(str(rank_example_dir / "rank.test"), str(tmp_path / "rank.test"))
    copyfile(str(rank_example_dir / "rank.test.query"), str(tmp_path / "rank.test.query"))
767

768
769
770
    lgb_train = lgb.Dataset(str(tmp_path / "rank.train"), params=params)
    lgb_valid = [lgb_train.create_valid(str(tmp_path / "rank.test"))]
    gbm_baseline = lgb.train(params, lgb_train, valid_sets=lgb_valid, num_boost_round=50)
771

772
    f_positions_out = open(str(tmp_path / "rank.train.position"), "w")
773
    for pos in positions:
774
        f_positions_out.write(str(pos) + "\n")
775
776
    f_positions_out.close()

777
778
779
    lgb_train = lgb.Dataset(str(tmp_path / "rank.train"), params=params)
    lgb_valid = [lgb_train.create_valid(str(tmp_path / "rank.test"))]
    gbm_unbiased_with_file = lgb.train(params, lgb_train, valid_sets=lgb_valid, num_boost_round=50)
780

781
    # the performance of the unbiased LambdaMART should outperform the plain LambdaMART on the dataset with position bias
782
    assert gbm_baseline.best_score["valid_0"]["ndcg@3"] + 0.03 <= gbm_unbiased_with_file.best_score["valid_0"]["ndcg@3"]
783
784

    # add extra row to position file
785
786
    with open(str(tmp_path / "rank.train.position"), "a") as file:
        file.write("pos_1000\n")
787
        file.close()
788
789
    lgb_train = lgb.Dataset(str(tmp_path / "rank.train"), params=params)
    lgb_valid = [lgb_train.create_valid(str(tmp_path / "rank.test"))]
790
    with pytest.raises(lgb.basic.LightGBMError, match=r"Positions size \(3006\) doesn't match data size"):
791
        lgb.train(params, lgb_train, valid_sets=lgb_valid, num_boost_round=50)
792
793


794
795
796
@pytest.mark.skipif(
    getenv("TASK", "") == "cuda", reason="Positions in learning to rank is not supported in CUDA version yet"
)
797
def test_ranking_with_position_information_with_dataset_constructor(tmp_path):
798
    rank_example_dir = Path(__file__).absolute().parents[2] / "examples" / "lambdarank"
799
    params = {
800
801
802
803
804
805
806
807
808
809
810
        "objective": "lambdarank",
        "verbose": -1,
        "eval_at": [3],
        "metric": "ndcg",
        "bagging_freq": 1,
        "bagging_fraction": 0.9,
        "min_data_in_leaf": 50,
        "min_sum_hessian_in_leaf": 5.0,
        "num_threads": 1,
        "deterministic": True,
        "seed": 0,
811
812
813
    }

    # simulate position bias for the train dataset and put the train dataset with biased labels to temp directory
814
815
816
817
818
819
820
821
822
    positions = simulate_position_bias(
        str(rank_example_dir / "rank.train"),
        str(rank_example_dir / "rank.train.query"),
        str(tmp_path / "rank.train"),
        baseline_feature=34,
    )
    copyfile(str(rank_example_dir / "rank.train.query"), str(tmp_path / "rank.train.query"))
    copyfile(str(rank_example_dir / "rank.test"), str(tmp_path / "rank.test"))
    copyfile(str(rank_example_dir / "rank.test.query"), str(tmp_path / "rank.test.query"))
823

824
825
826
    lgb_train = lgb.Dataset(str(tmp_path / "rank.train"), params=params)
    lgb_valid = [lgb_train.create_valid(str(tmp_path / "rank.test"))]
    gbm_baseline = lgb.train(params, lgb_train, valid_sets=lgb_valid, num_boost_round=50)
827
828
829
830

    positions = np.array(positions)

    # test setting positions through Dataset constructor with numpy array
831
832
833
    lgb_train = lgb.Dataset(str(tmp_path / "rank.train"), params=params, position=positions)
    lgb_valid = [lgb_train.create_valid(str(tmp_path / "rank.test"))]
    gbm_unbiased = lgb.train(params, lgb_train, valid_sets=lgb_valid, num_boost_round=50)
834
835

    # the performance of the unbiased LambdaMART should outperform the plain LambdaMART on the dataset with position bias
836
    assert gbm_baseline.best_score["valid_0"]["ndcg@3"] + 0.03 <= gbm_unbiased.best_score["valid_0"]["ndcg@3"]
837
838
839

    if PANDAS_INSTALLED:
        # test setting positions through Dataset constructor with pandas Series
840
841
842
843
844
845
        lgb_train = lgb.Dataset(str(tmp_path / "rank.train"), params=params, position=pd_Series(positions))
        lgb_valid = [lgb_train.create_valid(str(tmp_path / "rank.test"))]
        gbm_unbiased_pandas_series = lgb.train(params, lgb_train, valid_sets=lgb_valid, num_boost_round=50)
        assert (
            gbm_unbiased.best_score["valid_0"]["ndcg@3"] == gbm_unbiased_pandas_series.best_score["valid_0"]["ndcg@3"]
        )
846
847

    # test setting positions through set_position
848
849
    lgb_train = lgb.Dataset(str(tmp_path / "rank.train"), params=params)
    lgb_valid = [lgb_train.create_valid(str(tmp_path / "rank.test"))]
850
    lgb_train.set_position(positions)
851
852
    gbm_unbiased_set_position = lgb.train(params, lgb_train, valid_sets=lgb_valid, num_boost_round=50)
    assert gbm_unbiased.best_score["valid_0"]["ndcg@3"] == gbm_unbiased_set_position.best_score["valid_0"]["ndcg@3"]
853
854
855

    # test get_position works
    positions_from_get = lgb_train.get_position()
856
    np_assert_array_equal(positions_from_get, positions, strict=True)
857
858


859
860
def test_early_stopping():
    X, y = load_breast_cancer(return_X_y=True)
861
    params = {"objective": "binary", "metric": "binary_logloss", "verbose": -1}
862
863
864
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
    lgb_train = lgb.Dataset(X_train, y_train)
    lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train)
865
    valid_set_name = "valid_set"
866
    # no early stopping
867
868
869
870
871
872
873
874
    gbm = lgb.train(
        params,
        lgb_train,
        num_boost_round=10,
        valid_sets=lgb_eval,
        valid_names=valid_set_name,
        callbacks=[lgb.early_stopping(stopping_rounds=5)],
    )
875
876
    assert gbm.best_iteration == 10
    assert valid_set_name in gbm.best_score
877
    assert "binary_logloss" in gbm.best_score[valid_set_name]
878
    # early stopping occurs
879
880
881
882
883
884
885
886
    gbm = lgb.train(
        params,
        lgb_train,
        num_boost_round=40,
        valid_sets=lgb_eval,
        valid_names=valid_set_name,
        callbacks=[lgb.early_stopping(stopping_rounds=5)],
    )
887
888
    assert gbm.best_iteration <= 39
    assert valid_set_name in gbm.best_score
889
    assert "binary_logloss" in gbm.best_score[valid_set_name]
890
891


892
@pytest.mark.parametrize("use_valid", [True, False])
893
894
895
896
897
898
899
900
901
def test_early_stopping_ignores_training_set(use_valid):
    x = np.linspace(-1, 1, 100)
    X = x.reshape(-1, 1)
    y = x**2
    X_train, X_valid = X[:80], X[80:]
    y_train, y_valid = y[:80], y[80:]
    train_ds = lgb.Dataset(X_train, y_train)
    valid_ds = lgb.Dataset(X_valid, y_valid)
    valid_sets = [train_ds]
902
    valid_names = ["train"]
903
904
    if use_valid:
        valid_sets.append(valid_ds)
905
        valid_names.append("valid")
906
907
908
909
    eval_result = {}

    def train_fn():
        return lgb.train(
910
            {"num_leaves": 5},
911
912
913
914
            train_ds,
            num_boost_round=2,
            valid_sets=valid_sets,
            valid_names=valid_names,
915
            callbacks=[lgb.early_stopping(1), lgb.record_evaluation(eval_result)],
916
        )
917

918
919
920
    if use_valid:
        bst = train_fn()
        assert bst.best_iteration == 1
921
922
        assert eval_result["train"]["l2"][1] < eval_result["train"]["l2"][0]  # train improved
        assert eval_result["valid"]["l2"][1] > eval_result["valid"]["l2"][0]  # valid didn't
923
    else:
924
        with pytest.warns(UserWarning, match="Only training set found, disabling early stopping."):
925
926
927
928
929
            bst = train_fn()
        assert bst.current_iteration() == 2
        assert bst.best_iteration == 0


930
@pytest.mark.parametrize("first_metric_only", [True, False])
931
932
933
934
def test_early_stopping_via_global_params(first_metric_only):
    X, y = load_breast_cancer(return_X_y=True)
    num_trees = 5
    params = {
935
936
937
938
939
940
        "num_trees": num_trees,
        "objective": "binary",
        "metric": "None",
        "verbose": -1,
        "early_stopping_round": 2,
        "first_metric_only": first_metric_only,
941
942
943
944
    }
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
    lgb_train = lgb.Dataset(X_train, y_train)
    lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train)
945
946
947
948
    valid_set_name = "valid_set"
    gbm = lgb.train(
        params, lgb_train, feval=[decreasing_metric, constant_metric], valid_sets=lgb_eval, valid_names=valid_set_name
    )
949
950
951
952
953
    if first_metric_only:
        assert gbm.best_iteration == num_trees
    else:
        assert gbm.best_iteration == 1
    assert valid_set_name in gbm.best_score
954
955
    assert "decreasing_metric" in gbm.best_score[valid_set_name]
    assert "error" in gbm.best_score[valid_set_name]
956
957


958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
@pytest.mark.parametrize("early_stopping_round", [-10, -1, 0, None, "None"])
def test_early_stopping_is_not_enabled_for_non_positive_stopping_rounds(early_stopping_round):
    X, y = load_breast_cancer(return_X_y=True)
    num_trees = 5
    params = {
        "num_trees": num_trees,
        "objective": "binary",
        "metric": "None",
        "verbose": -1,
        "early_stopping_round": early_stopping_round,
        "first_metric_only": True,
    }
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
    lgb_train = lgb.Dataset(X_train, y_train)
    lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train)
    valid_set_name = "valid_set"

    if early_stopping_round is None:
        gbm = lgb.train(
            params,
            lgb_train,
            feval=[constant_metric],
            valid_sets=lgb_eval,
            valid_names=valid_set_name,
        )
        assert "early_stopping_round" not in gbm.params
        assert gbm.num_trees() == num_trees
    elif early_stopping_round == "None":
        with pytest.raises(TypeError, match="early_stopping_round should be an integer. Got 'str'"):
            gbm = lgb.train(
                params,
                lgb_train,
                feval=[constant_metric],
                valid_sets=lgb_eval,
                valid_names=valid_set_name,
            )
    elif early_stopping_round <= 0:
        gbm = lgb.train(
            params,
            lgb_train,
            feval=[constant_metric],
            valid_sets=lgb_eval,
            valid_names=valid_set_name,
        )
        assert gbm.params["early_stopping_round"] == early_stopping_round
        assert gbm.num_trees() == num_trees


1006
1007
1008
@pytest.mark.parametrize("first_only", [True, False])
@pytest.mark.parametrize("single_metric", [True, False])
@pytest.mark.parametrize("greater_is_better", [True, False])
1009
1010
1011
1012
def test_early_stopping_min_delta(first_only, single_metric, greater_is_better):
    if single_metric and not first_only:
        pytest.skip("first_metric_only doesn't affect single metric.")
    metric2min_delta = {
1013
1014
1015
1016
        "auc": 0.001,
        "binary_logloss": 0.01,
        "average_precision": 0.001,
        "mape": 0.01,
1017
1018
1019
    }
    if single_metric:
        if greater_is_better:
1020
            metric = "auc"
1021
        else:
1022
            metric = "binary_logloss"
1023
1024
1025
    else:
        if first_only:
            if greater_is_better:
1026
                metric = ["auc", "binary_logloss"]
1027
            else:
1028
                metric = ["binary_logloss", "auc"]
1029
1030
        else:
            if greater_is_better:
1031
                metric = ["auc", "average_precision"]
1032
            else:
1033
                metric = ["binary_logloss", "mape"]
1034
1035
1036
1037
1038
1039

    X, y = load_breast_cancer(return_X_y=True)
    X_train, X_valid, y_train, y_valid = train_test_split(X, y, test_size=0.2, random_state=0)
    train_ds = lgb.Dataset(X_train, y_train)
    valid_ds = lgb.Dataset(X_valid, y_valid, reference=train_ds)

1040
    params = {"objective": "binary", "metric": metric, "verbose": -1}
1041
1042
1043
1044
1045
1046
    if isinstance(metric, str):
        min_delta = metric2min_delta[metric]
    elif first_only:
        min_delta = metric2min_delta[metric[0]]
    else:
        min_delta = [metric2min_delta[m] for m in metric]
1047
1048
1049
1050
1051
    train_kwargs = {
        "params": params,
        "train_set": train_ds,
        "num_boost_round": 50,
        "valid_sets": [train_ds, valid_ds],
1052
        "valid_names": ["training", "valid"],
1053
    }
1054
1055
1056

    # regular early stopping
    evals_result = {}
1057
    train_kwargs["callbacks"] = [
1058
        lgb.callback.early_stopping(10, first_only, verbose=False),
1059
        lgb.record_evaluation(evals_result),
1060
1061
    ]
    bst = lgb.train(**train_kwargs)
1062
    scores = np.vstack(list(evals_result["valid"].values())).T
1063
1064
1065

    # positive min_delta
    delta_result = {}
1066
    train_kwargs["callbacks"] = [
1067
        lgb.callback.early_stopping(10, first_only, verbose=False, min_delta=min_delta),
1068
        lgb.record_evaluation(delta_result),
1069
1070
    ]
    delta_bst = lgb.train(**train_kwargs)
1071
    delta_scores = np.vstack(list(delta_result["valid"].values())).T
1072
1073
1074
1075
1076
1077

    if first_only:
        scores = scores[:, 0]
        delta_scores = delta_scores[:, 0]

    assert delta_bst.num_trees() < bst.num_trees()
1078
    np.testing.assert_allclose(scores[: len(delta_scores)], delta_scores)
1079
1080
1081
1082
1083
1084
1085
1086
    last_score = delta_scores[-1]
    best_score = delta_scores[delta_bst.num_trees() - 1]
    if greater_is_better:
        assert np.less_equal(last_score, best_score + min_delta).any()
    else:
        assert np.greater_equal(last_score, best_score - min_delta).any()


1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
@pytest.mark.parametrize("early_stopping_min_delta", [1e3, 0.0])
def test_early_stopping_min_delta_via_global_params(early_stopping_min_delta):
    X, y = load_breast_cancer(return_X_y=True)
    num_trees = 5
    params = {
        "num_trees": num_trees,
        "num_leaves": 5,
        "objective": "binary",
        "metric": "None",
        "verbose": -1,
        "early_stopping_round": 2,
        "early_stopping_min_delta": early_stopping_min_delta,
    }
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
    lgb_train = lgb.Dataset(X_train, y_train)
    lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train)
    gbm = lgb.train(params, lgb_train, feval=decreasing_metric, valid_sets=lgb_eval)
    if early_stopping_min_delta == 0:
        assert gbm.best_iteration == num_trees
    else:
        assert gbm.best_iteration == 1


1110
1111
1112
1113
1114
1115
def test_early_stopping_can_be_triggered_via_custom_callback():
    X, y = make_synthetic_regression()

    def _early_stop_after_seventh_iteration(env):
        if env.iteration == 6:
            exc = lgb.EarlyStopException(
1116
                best_iteration=6, best_score=[("some_validation_set", "some_metric", 0.708, True)]
1117
1118
1119
1120
            )
            raise exc

    bst = lgb.train(
1121
        params={"objective": "regression", "verbose": -1, "num_leaves": 2},
1122
1123
        train_set=lgb.Dataset(X, label=y),
        num_boost_round=23,
1124
        callbacks=[_early_stop_after_seventh_iteration],
1125
1126
1127
1128
1129
1130
1131
    )
    assert bst.num_trees() == 7
    assert bst.best_score["some_validation_set"]["some_metric"] == 0.708
    assert bst.best_iteration == 7
    assert bst.current_iteration() == 7


1132
def test_continue_train(tmp_path):
1133
    X, y = make_synthetic_regression()
1134
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
1135
    params = {"objective": "regression", "metric": "l1", "verbose": -1}
1136
1137
1138
    lgb_train = lgb.Dataset(X_train, y_train, free_raw_data=False)
    lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train, free_raw_data=False)
    init_gbm = lgb.train(params, lgb_train, num_boost_round=20)
1139
1140
    model_path = tmp_path / "model.txt"
    init_gbm.save_model(model_path)
1141
    evals_result = {}
1142
1143
1144
1145
1146
1147
    gbm = lgb.train(
        params,
        lgb_train,
        num_boost_round=30,
        valid_sets=lgb_eval,
        # test custom eval metrics
1148
        feval=(lambda p, d: ("custom_mae", mean_absolute_error(p, d.get_label()), False)),
1149
        callbacks=[lgb.record_evaluation(evals_result)],
1150
        init_model=model_path,
1151
    )
1152
    ret = mean_absolute_error(y_test, gbm.predict(X_test))
1153
    assert ret < 13.6
1154
1155
    assert evals_result["valid_0"]["l1"][-1] == pytest.approx(ret)
    np.testing.assert_allclose(evals_result["valid_0"]["l1"], evals_result["valid_0"]["custom_mae"])
1156
1157
1158


def test_continue_train_reused_dataset():
1159
    X, y = make_synthetic_regression()
1160
    params = {"objective": "regression", "verbose": -1}
1161
1162
1163
1164
1165
1166
1167
1168
1169
    lgb_train = lgb.Dataset(X, y, free_raw_data=False)
    init_gbm = lgb.train(params, lgb_train, num_boost_round=5)
    init_gbm_2 = lgb.train(params, lgb_train, num_boost_round=5, init_model=init_gbm)
    init_gbm_3 = lgb.train(params, lgb_train, num_boost_round=5, init_model=init_gbm_2)
    gbm = lgb.train(params, lgb_train, num_boost_round=5, init_model=init_gbm_3)
    assert gbm.current_iteration() == 20


def test_continue_train_dart():
1170
    X, y = make_synthetic_regression()
1171
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
1172
    params = {"boosting_type": "dart", "objective": "regression", "metric": "l1", "verbose": -1}
1173
1174
1175
1176
    lgb_train = lgb.Dataset(X_train, y_train, free_raw_data=False)
    lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train, free_raw_data=False)
    init_gbm = lgb.train(params, lgb_train, num_boost_round=50)
    evals_result = {}
1177
1178
1179
1180
1181
1182
    gbm = lgb.train(
        params,
        lgb_train,
        num_boost_round=50,
        valid_sets=lgb_eval,
        callbacks=[lgb.record_evaluation(evals_result)],
1183
        init_model=init_gbm,
1184
    )
1185
    ret = mean_absolute_error(y_test, gbm.predict(X_test))
1186
    assert ret < 13.6
1187
    assert evals_result["valid_0"]["l1"][-1] == pytest.approx(ret)
1188
1189
1190
1191
1192


def test_continue_train_multiclass():
    X, y = load_iris(return_X_y=True)
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
1193
    params = {"objective": "multiclass", "metric": "multi_logloss", "num_class": 3, "verbose": -1}
1194
1195
1196
1197
    lgb_train = lgb.Dataset(X_train, y_train, params=params, free_raw_data=False)
    lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train, params=params, free_raw_data=False)
    init_gbm = lgb.train(params, lgb_train, num_boost_round=20)
    evals_result = {}
1198
1199
1200
1201
1202
1203
    gbm = lgb.train(
        params,
        lgb_train,
        num_boost_round=30,
        valid_sets=lgb_eval,
        callbacks=[lgb.record_evaluation(evals_result)],
1204
        init_model=init_gbm,
1205
    )
1206
1207
    ret = multi_logloss(y_test, gbm.predict(X_test))
    assert ret < 0.1
1208
    assert evals_result["valid_0"]["multi_logloss"][-1] == pytest.approx(ret)
1209
1210
1211


def test_cv():
1212
    X_train, y_train = make_synthetic_regression()
1213
    params = {"verbose": -1}
1214
1215
    lgb_train = lgb.Dataset(X_train, y_train)
    # shuffle = False, override metric in params
1216
1217
1218
1219
1220
1221
1222
    params_with_metric = {"metric": "l2", "verbose": -1}
    cv_res = lgb.cv(
        params_with_metric, lgb_train, num_boost_round=10, nfold=3, stratified=False, shuffle=False, metrics="l1"
    )
    assert "valid l1-mean" in cv_res
    assert "valid l2-mean" not in cv_res
    assert len(cv_res["valid l1-mean"]) == 10
1223
    # shuffle = True, callbacks
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
    cv_res = lgb.cv(
        params,
        lgb_train,
        num_boost_round=10,
        nfold=3,
        stratified=False,
        shuffle=True,
        metrics="l1",
        callbacks=[lgb.reset_parameter(learning_rate=lambda i: 0.1 - 0.001 * i)],
    )
    assert "valid l1-mean" in cv_res
    assert len(cv_res["valid l1-mean"]) == 10
1236
    # enable display training loss
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
    cv_res = lgb.cv(
        params_with_metric,
        lgb_train,
        num_boost_round=10,
        nfold=3,
        stratified=False,
        shuffle=False,
        metrics="l1",
        eval_train_metric=True,
    )
    assert "train l1-mean" in cv_res
    assert "valid l1-mean" in cv_res
    assert "train l2-mean" not in cv_res
    assert "valid l2-mean" not in cv_res
    assert len(cv_res["train l1-mean"]) == 10
    assert len(cv_res["valid l1-mean"]) == 10
1253
1254
1255
    # self defined folds
    tss = TimeSeriesSplit(3)
    folds = tss.split(X_train)
1256
1257
    cv_res_gen = lgb.cv(params_with_metric, lgb_train, num_boost_round=10, folds=folds)
    cv_res_obj = lgb.cv(params_with_metric, lgb_train, num_boost_round=10, folds=tss)
1258
    np.testing.assert_allclose(cv_res_gen["valid l2-mean"], cv_res_obj["valid l2-mean"])
Andrew Ziem's avatar
Andrew Ziem committed
1259
    # LambdaRank
1260
1261
1262
1263
    rank_example_dir = Path(__file__).absolute().parents[2] / "examples" / "lambdarank"
    X_train, y_train = load_svmlight_file(str(rank_example_dir / "rank.train"))
    q_train = np.loadtxt(str(rank_example_dir / "rank.train.query"))
    params_lambdarank = {"objective": "lambdarank", "verbose": -1, "eval_at": 3}
1264
1265
    lgb_train = lgb.Dataset(X_train, y_train, group=q_train)
    # ... with l2 metric
1266
    cv_res_lambda = lgb.cv(params_lambdarank, lgb_train, num_boost_round=10, nfold=3, metrics="l2")
1267
    assert len(cv_res_lambda) == 2
1268
    assert not np.isnan(cv_res_lambda["valid l2-mean"]).any()
1269
    # ... with NDCG (default) metric
1270
    cv_res_lambda = lgb.cv(params_lambdarank, lgb_train, num_boost_round=10, nfold=3)
1271
    assert len(cv_res_lambda) == 2
1272
    assert not np.isnan(cv_res_lambda["valid ndcg@3-mean"]).any()
1273
    # self defined folds with lambdarank
1274
1275
    cv_res_lambda_obj = lgb.cv(params_lambdarank, lgb_train, num_boost_round=10, folds=GroupKFold(n_splits=3))
    np.testing.assert_allclose(cv_res_lambda["valid ndcg@3-mean"], cv_res_lambda_obj["valid ndcg@3-mean"])
1276
1277


1278
1279
def test_cv_works_with_init_model(tmp_path):
    X, y = make_synthetic_regression()
1280
    params = {"objective": "regression", "verbose": -1}
1281
1282
    num_train_rounds = 2
    lgb_train = lgb.Dataset(X, y, free_raw_data=False)
1283
    bst = lgb.train(params=params, train_set=lgb_train, num_boost_round=num_train_rounds)
1284
    preds_raw = bst.predict(X, raw_score=True)
1285
    model_path_txt = str(tmp_path / "lgb.model")
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
    bst.save_model(model_path_txt)

    num_cv_rounds = 5
    cv_kwargs = {
        "num_boost_round": num_cv_rounds,
        "nfold": 3,
        "stratified": False,
        "shuffle": False,
        "seed": 708,
        "return_cvbooster": True,
1296
        "params": params,
1297
1298
1299
    }

    # init_model from an in-memory Booster
1300
    cv_res = lgb.cv(train_set=lgb_train, init_model=bst, **cv_kwargs)
1301
1302
1303
    cv_bst_w_in_mem_init_model = cv_res["cvbooster"]
    assert cv_bst_w_in_mem_init_model.current_iteration() == [num_train_rounds + num_cv_rounds] * 3
    for booster in cv_bst_w_in_mem_init_model.boosters:
1304
        np.testing.assert_allclose(preds_raw, booster.predict(X, raw_score=True, num_iteration=num_train_rounds))
1305
1306

    # init_model from a text file
1307
    cv_res = lgb.cv(train_set=lgb_train, init_model=model_path_txt, **cv_kwargs)
1308
1309
1310
    cv_bst_w_file_init_model = cv_res["cvbooster"]
    assert cv_bst_w_file_init_model.current_iteration() == [num_train_rounds + num_cv_rounds] * 3
    for booster in cv_bst_w_file_init_model.boosters:
1311
        np.testing.assert_allclose(preds_raw, booster.predict(X, raw_score=True, num_iteration=num_train_rounds))
1312
1313
1314
1315

    # predictions should be identical
    for i in range(3):
        np.testing.assert_allclose(
1316
            cv_bst_w_in_mem_init_model.boosters[i].predict(X), cv_bst_w_file_init_model.boosters[i].predict(X)
1317
1318
1319
        )


1320
1321
1322
1323
def test_cvbooster():
    X, y = load_breast_cancer(return_X_y=True)
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
    params = {
1324
1325
1326
        "objective": "binary",
        "metric": "binary_logloss",
        "verbose": -1,
1327
    }
1328
    nfold = 3
1329
1330
    lgb_train = lgb.Dataset(X_train, y_train)
    # with early stopping
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
    cv_res = lgb.cv(
        params,
        lgb_train,
        num_boost_round=25,
        nfold=nfold,
        callbacks=[lgb.early_stopping(stopping_rounds=5)],
        return_cvbooster=True,
    )
    assert "cvbooster" in cv_res
    cvb = cv_res["cvbooster"]
1341
1342
    assert isinstance(cvb, lgb.CVBooster)
    assert isinstance(cvb.boosters, list)
1343
    assert len(cvb.boosters) == nfold
1344
1345
1346
    assert all(isinstance(bst, lgb.Booster) for bst in cvb.boosters)
    assert cvb.best_iteration > 0
    # predict by each fold booster
1347
    preds = cvb.predict(X_test)
1348
    assert isinstance(preds, list)
1349
1350
1351
1352
1353
1354
    assert len(preds) == nfold
    # check that each booster predicted using the best iteration
    for fold_preds, bst in zip(preds, cvb.boosters):
        assert bst.best_iteration == cvb.best_iteration
        expected = bst.predict(X_test, num_iteration=cvb.best_iteration)
        np.testing.assert_allclose(fold_preds, expected)
1355
1356
1357
1358
1359
    # fold averaging
    avg_pred = np.mean(preds, axis=0)
    ret = log_loss(y_test, avg_pred)
    assert ret < 0.13
    # without early stopping
1360
1361
    cv_res = lgb.cv(params, lgb_train, num_boost_round=20, nfold=3, return_cvbooster=True)
    cvb = cv_res["cvbooster"]
1362
1363
1364
1365
1366
1367
1368
    assert cvb.best_iteration == -1
    preds = cvb.predict(X_test)
    avg_pred = np.mean(preds, axis=0)
    ret = log_loss(y_test, avg_pred)
    assert ret < 0.15


1369
1370
1371
1372
def test_cvbooster_save_load(tmp_path):
    X, y = load_breast_cancer(return_X_y=True)
    X_train, X_test, y_train, _ = train_test_split(X, y, test_size=0.1, random_state=42)
    params = {
1373
1374
1375
        "objective": "binary",
        "metric": "binary_logloss",
        "verbose": -1,
1376
1377
1378
1379
    }
    nfold = 3
    lgb_train = lgb.Dataset(X_train, y_train)

1380
1381
1382
1383
1384
1385
1386
1387
1388
    cv_res = lgb.cv(
        params,
        lgb_train,
        num_boost_round=10,
        nfold=nfold,
        callbacks=[lgb.early_stopping(stopping_rounds=5)],
        return_cvbooster=True,
    )
    cvbooster = cv_res["cvbooster"]
1389
1390
1391
    preds = cvbooster.predict(X_test)
    best_iteration = cvbooster.best_iteration

1392
    model_path_txt = str(tmp_path / "lgb.model")
1393
1394
1395
1396
1397
1398
1399
1400
1401

    cvbooster.save_model(model_path_txt)
    model_string = cvbooster.model_to_string()
    del cvbooster

    cvbooster_from_txt_file = lgb.CVBooster(model_file=model_path_txt)
    cvbooster_from_string = lgb.CVBooster().model_from_string(model_string)
    for cvbooster_loaded in [cvbooster_from_txt_file, cvbooster_from_string]:
        assert best_iteration == cvbooster_loaded.best_iteration
1402
        np_assert_array_equal(preds, cvbooster_loaded.predict(X_test), strict=True)
1403
1404


1405
@pytest.mark.parametrize("serializer", SERIALIZERS)
1406
1407
1408
1409
def test_cvbooster_picklable(serializer):
    X, y = load_breast_cancer(return_X_y=True)
    X_train, X_test, y_train, _ = train_test_split(X, y, test_size=0.1, random_state=42)
    params = {
1410
1411
1412
        "objective": "binary",
        "metric": "binary_logloss",
        "verbose": -1,
1413
1414
1415
1416
    }
    nfold = 3
    lgb_train = lgb.Dataset(X_train, y_train)

1417
1418
1419
1420
1421
1422
1423
1424
1425
    cv_res = lgb.cv(
        params,
        lgb_train,
        num_boost_round=10,
        nfold=nfold,
        callbacks=[lgb.early_stopping(stopping_rounds=5)],
        return_cvbooster=True,
    )
    cvbooster = cv_res["cvbooster"]
1426
1427
1428
1429
1430
1431
1432
1433
1434
    preds = cvbooster.predict(X_test)
    best_iteration = cvbooster.best_iteration

    cvbooster_from_disk = pickle_and_unpickle_object(obj=cvbooster, serializer=serializer)
    del cvbooster

    assert best_iteration == cvbooster_from_disk.best_iteration

    preds_from_disk = cvbooster_from_disk.predict(X_test)
1435
    np_assert_array_equal(preds, preds_from_disk, strict=True)
1436
1437


1438
def test_feature_name():
1439
    X_train, y_train = make_synthetic_regression()
1440
1441
    params = {"verbose": -1}
    feature_names = [f"f_{i}" for i in range(X_train.shape[-1])]
1442
1443
    lgb_train = lgb.Dataset(X_train, y_train, feature_name=feature_names)
    gbm = lgb.train(params, lgb_train, num_boost_round=5)
1444
1445
    assert feature_names == gbm.feature_name()
    # test feature_names with whitespaces
1446
    feature_names_with_space = [f"f {i}" for i in range(X_train.shape[-1])]
1447
1448
    lgb_train.set_feature_name(feature_names_with_space)
    gbm = lgb.train(params, lgb_train, num_boost_round=5)
1449
1450
1451
    assert feature_names == gbm.feature_name()


1452
def test_feature_name_with_non_ascii(rng, tmp_path):
1453
1454
    X_train = rng.normal(size=(100, 4))
    y_train = rng.normal(size=(100,))
1455
    # This has non-ascii strings.
1456
1457
    feature_names = ["F_零", "F_一", "F_二", "F_三"]
    params = {"verbose": -1}
1458
    lgb_train = lgb.Dataset(X_train, y_train, feature_name=feature_names)
1459

1460
    gbm = lgb.train(params, lgb_train, num_boost_round=5)
1461
    assert feature_names == gbm.feature_name()
1462
1463
    model_path_txt = str(tmp_path / "lgb.model")
    gbm.save_model(model_path_txt)
1464

1465
    gbm2 = lgb.Booster(model_file=model_path_txt)
1466
1467
1468
    assert feature_names == gbm2.feature_name()


1469
1470
1471
1472
1473
1474
1475
1476
def test_parameters_are_loaded_from_model_file(tmp_path, capsys, rng):
    X = np.hstack(
        [
            rng.uniform(size=(100, 1)),
            rng.integers(low=0, high=5, size=(100, 2)),
        ]
    )
    y = rng.uniform(size=(100,))
1477
    ds = lgb.Dataset(X, y, categorical_feature=[1, 2])
1478
    params = {
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
        "bagging_fraction": 0.8,
        "bagging_freq": 2,
        "boosting": "rf",
        "feature_contri": [0.5, 0.5, 0.5],
        "feature_fraction": 0.7,
        "boost_from_average": False,
        "interaction_constraints": [[0, 1], [0]],
        "metric": ["l2", "rmse"],
        "num_leaves": 5,
        "num_threads": 1,
1489
        "verbosity": 0,
1490
    }
1491
    model_file = tmp_path / "model.txt"
1492
    orig_bst = lgb.train(params, ds, num_boost_round=1)
1493
    orig_bst.save_model(model_file)
1494
    with model_file.open("rt") as f:
1495
        model_contents = f.readlines()
1496
1497
1498
    params_start = model_contents.index("parameters:\n")
    model_contents.insert(params_start + 1, "[max_conflict_rate: 0]\n")
    with model_file.open("wt") as f:
1499
        f.writelines(model_contents)
1500
    bst = lgb.Booster(model_file=model_file)
1501
1502
1503
    expected_msg = "[LightGBM] [Warning] Ignoring unrecognized parameter 'max_conflict_rate' found in model string."
    stdout = capsys.readouterr().out
    assert expected_msg in stdout
1504
1505
    set_params = {k: bst.params[k] for k in params.keys()}
    assert set_params == params
1506
    assert bst.params["categorical_feature"] == [1, 2]
1507
1508

    # check that passing parameters to the constructor raises warning and ignores them
1509
    with pytest.warns(UserWarning, match="Ignoring params argument, using parameters from model file."):
1510
        bst2 = lgb.Booster(params={"num_leaves": 7}, model_file=model_file)
1511
1512
    assert bst.params == bst2.params

1513
1514
1515
1516
1517
    # check inference isn't affected by unknown parameter
    orig_preds = orig_bst.predict(X)
    preds = bst.predict(X)
    np.testing.assert_allclose(preds, orig_preds)

1518

1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
def test_string_serialized_params_retrieval(rng):
    # Random train data
    train_x = rng.random((500, 3))
    train_y = rng.integers(0, 1, 500)
    train_data = lgb.Dataset(train_x, train_y)

    # Parameters
    params = {
        "boosting": "gbdt",
        "deterministic": True,
        "feature_contri": [0.5] * train_x.shape[1],
        "interaction_constraints": [[0, 1], [0]],
        "objective": "binary",
        "metric": ["auc"],
        "num_leaves": 7,
        "learning_rate": 0.05,
        "feature_fraction": 0.9,
        "bagging_fraction": 0.8,
        "bagging_freq": 5,
        "verbosity": -100,
    }

    # train a model and serialize it to a string in memory
    model = lgb.train(params, train_data, num_boost_round=2)
    model_serialized = model.model_to_string()

    # load a new model with the string
    with pytest.warns(UserWarning, match="Ignoring params argument, using parameters from model string."):
        new_model = lgb.Booster(params={"num_leaves": 32}, model_str=model_serialized)

    assert new_model.params["boosting"] == "gbdt"
    assert new_model.params["deterministic"] is True
    assert new_model.params["feature_contri"] == [0.5] * train_x.shape[1]
    assert new_model.params["interaction_constraints"] == [[0, 1], [0]]
    assert new_model.params["objective"] == "binary"
    assert new_model.params["metric"] == ["auc"]
    assert new_model.params["num_leaves"] == 7
    assert new_model.params["learning_rate"] == 0.05
    assert new_model.params["feature_fraction"] == 0.9
    assert new_model.params["bagging_fraction"] == 0.8
    assert new_model.params["bagging_freq"] == 5
    assert new_model.params["verbosity"] == -100


1563
def test_save_load_copy_pickle(tmp_path):
1564
    def train_and_predict(init_model=None, return_model=False):
1565
        X, y = make_synthetic_regression()
1566
        X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
1567
        params = {"objective": "regression", "metric": "l2", "verbose": -1}
1568
        lgb_train = lgb.Dataset(X_train, y_train)
1569
1570
1571
1572
1573
1574
        gbm_template = lgb.train(params, lgb_train, num_boost_round=10, init_model=init_model)
        return gbm_template if return_model else mean_squared_error(y_test, gbm_template.predict(X_test))

    gbm = train_and_predict(return_model=True)
    ret_origin = train_and_predict(init_model=gbm)
    other_ret = []
1575
1576
1577
    model_path_txt = str(tmp_path / "lgb.model")
    gbm.save_model(model_path_txt)
    with open(model_path_txt) as f:  # check all params are logged into model file correctly
1578
        assert f.read().find("[num_iterations: 10]") != -1
1579
1580
    other_ret.append(train_and_predict(init_model=model_path_txt))
    gbm_load = lgb.Booster(model_file=model_path_txt)
1581
1582
1583
    other_ret.append(train_and_predict(init_model=gbm_load))
    other_ret.append(train_and_predict(init_model=copy.copy(gbm)))
    other_ret.append(train_and_predict(init_model=copy.deepcopy(gbm)))
1584
1585
    model_path_pkl = str(tmp_path / "lgb.pkl")
    with open(model_path_pkl, "wb") as f:
1586
        pickle.dump(gbm, f)
1587
    with open(model_path_pkl, "rb") as f:
1588
1589
1590
1591
1592
1593
1594
1595
        gbm_pickle = pickle.load(f)
    other_ret.append(train_and_predict(init_model=gbm_pickle))
    gbm_pickles = pickle.loads(pickle.dumps(gbm))
    other_ret.append(train_and_predict(init_model=gbm_pickles))
    for ret in other_ret:
        assert ret_origin == pytest.approx(ret)


1596
1597
1598
def test_all_expected_params_are_written_out_to_model_text(tmp_path):
    X, y = make_synthetic_regression()
    params = {
1599
1600
1601
1602
1603
1604
        "objective": "mape",
        "metric": ["l2", "mae"],
        "seed": 708,
        "data_sample_strategy": "bagging",
        "sub_row": 0.8234,
        "verbose": -1,
1605
1606
    }
    dtrain = lgb.Dataset(data=X, label=y)
1607
    gbm = lgb.train(params=params, train_set=dtrain, num_boost_round=3)
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652

    model_txt_from_memory = gbm.model_to_string()
    model_file = tmp_path / "out.model"
    gbm.save_model(filename=model_file)
    with open(model_file, "r") as f:
        model_txt_from_file = f.read()

    assert model_txt_from_memory == model_txt_from_file

    # entries whose values should reflect params passed to lgb.train()
    non_default_param_entries = [
        "[objective: mape]",
        # 'l1' was passed in with alias 'mae'
        "[metric: l2,l1]",
        "[data_sample_strategy: bagging]",
        "[seed: 708]",
        # NOTE: this was passed in with alias 'sub_row'
        "[bagging_fraction: 0.8234]",
        "[num_iterations: 3]",
    ]

    # entries with default values of params
    default_param_entries = [
        "[boosting: gbdt]",
        "[tree_learner: serial]",
        "[data: ]",
        "[valid: ]",
        "[learning_rate: 0.1]",
        "[num_leaves: 31]",
        "[num_threads: 0]",
        "[deterministic: 0]",
        "[histogram_pool_size: -1]",
        "[max_depth: -1]",
        "[min_data_in_leaf: 20]",
        "[min_sum_hessian_in_leaf: 0.001]",
        "[pos_bagging_fraction: 1]",
        "[neg_bagging_fraction: 1]",
        "[bagging_freq: 0]",
        "[bagging_seed: 15415]",
        "[feature_fraction: 1]",
        "[feature_fraction_bynode: 1]",
        "[feature_fraction_seed: 32671]",
        "[extra_trees: 0]",
        "[extra_seed: 6642]",
        "[early_stopping_round: 0]",
1653
        "[early_stopping_min_delta: 0]",
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
        "[first_metric_only: 0]",
        "[max_delta_step: 0]",
        "[lambda_l1: 0]",
        "[lambda_l2: 0]",
        "[linear_lambda: 0]",
        "[min_gain_to_split: 0]",
        "[drop_rate: 0.1]",
        "[max_drop: 50]",
        "[skip_drop: 0.5]",
        "[xgboost_dart_mode: 0]",
        "[uniform_drop: 0]",
        "[drop_seed: 20623]",
        "[top_rate: 0.2]",
        "[other_rate: 0.1]",
        "[min_data_per_group: 100]",
        "[max_cat_threshold: 32]",
        "[cat_l2: 10]",
        "[cat_smooth: 10]",
        "[max_cat_to_onehot: 4]",
        "[top_k: 20]",
        "[monotone_constraints: ]",
        "[monotone_constraints_method: basic]",
        "[monotone_penalty: 0]",
        "[feature_contri: ]",
        "[forcedsplits_filename: ]",
        "[refit_decay_rate: 0.9]",
        "[cegb_tradeoff: 1]",
        "[cegb_penalty_split: 0]",
        "[cegb_penalty_feature_lazy: ]",
        "[cegb_penalty_feature_coupled: ]",
        "[path_smooth: 0]",
        "[interaction_constraints: ]",
        "[verbosity: -1]",
        "[saved_feature_importance_type: 0]",
        "[use_quantized_grad: 0]",
        "[num_grad_quant_bins: 4]",
        "[quant_train_renew_leaf: 0]",
        "[stochastic_rounding: 1]",
        "[linear_tree: 0]",
        "[max_bin: 255]",
        "[max_bin_by_feature: ]",
        "[min_data_in_bin: 3]",
        "[bin_construct_sample_cnt: 200000]",
        "[data_random_seed: 2350]",
        "[is_enable_sparse: 1]",
        "[enable_bundle: 1]",
        "[use_missing: 1]",
        "[zero_as_missing: 0]",
        "[feature_pre_filter: 1]",
        "[pre_partition: 0]",
        "[two_round: 0]",
        "[header: 0]",
        "[label_column: ]",
        "[weight_column: ]",
        "[group_column: ]",
        "[ignore_column: ]",
        "[categorical_feature: ]",
        "[forcedbins_filename: ]",
        "[precise_float_parser: 0]",
        "[parser_config_file: ]",
        "[objective_seed: 4309]",
        "[num_class: 1]",
        "[is_unbalance: 0]",
        "[scale_pos_weight: 1]",
        "[sigmoid: 1]",
        "[boost_from_average: 1]",
        "[reg_sqrt: 0]",
        "[alpha: 0.9]",
        "[fair_c: 1]",
        "[poisson_max_delta_step: 0.7]",
        "[tweedie_variance_power: 1.5]",
        "[lambdarank_truncation_level: 30]",
        "[lambdarank_norm: 1]",
        "[label_gain: ]",
        "[lambdarank_position_bias_regularization: 0]",
        "[eval_at: ]",
        "[multi_error_top_k: 1]",
        "[auc_mu_weights: ]",
        "[num_machines: 1]",
        "[local_listen_port: 12400]",
        "[time_out: 120]",
        "[machine_list_filename: ]",
        "[machines: ]",
        "[gpu_platform_id: -1]",
        "[gpu_device_id: -1]",
        "[num_gpu: 1]",
    ]
    all_param_entries = non_default_param_entries + default_param_entries

    # add device-specific entries
    #
    # passed-in force_col_wise / force_row_wise parameters are ignored on CUDA and GPU builds...
    # https://github.com/microsoft/LightGBM/blob/1d7ee63686272bceffd522284127573b511df6be/src/io/config.cpp#L375-L377
1747
1748
1749
1750
    if getenv("TASK", "") == "cuda":
        device_entries = ["[force_col_wise: 0]", "[force_row_wise: 1]", "[device_type: cuda]", "[gpu_use_dp: 1]"]
    elif getenv("TASK", "") == "gpu":
        device_entries = ["[force_col_wise: 1]", "[force_row_wise: 0]", "[device_type: gpu]", "[gpu_use_dp: 0]"]
1751
    else:
1752
        device_entries = ["[force_col_wise: 0]", "[force_row_wise: 0]", "[device_type: cpu]", "[gpu_use_dp: 0]"]
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774

    all_param_entries += device_entries

    # check that model text has all expected param entries
    for param_str in all_param_entries:
        assert param_str in model_txt_from_file
        assert param_str in model_txt_from_memory

    # since Booster.model_to_string() is used when pickling, check that parameters all
    # roundtrip pickling successfully too
    gbm_pkl = pickle_and_unpickle_object(gbm, serializer="joblib")
    model_txt_from_memory = gbm_pkl.model_to_string()
    model_file = tmp_path / "out-pkl.model"
    gbm_pkl.save_model(filename=model_file)
    with open(model_file, "r") as f:
        model_txt_from_file = f.read()

    for param_str in all_param_entries:
        assert param_str in model_txt_from_file
        assert param_str in model_txt_from_memory


1775
1776
# why fixed seed?
# sometimes there is no difference how cols are treated (cat or not cat)
1777
def test_pandas_categorical(rng_fixed_seed, tmp_path):
1778
    pd = pytest.importorskip("pandas")
1779
1780
    X = pd.DataFrame(
        {
1781
1782
1783
1784
1785
            "A": rng_fixed_seed.permutation(["a", "b", "c", "d"] * 75),  # str
            "B": rng_fixed_seed.permutation([1, 2, 3] * 100),  # int
            "C": rng_fixed_seed.permutation([0.1, 0.2, -0.1, -0.1, 0.2] * 60),  # float
            "D": rng_fixed_seed.permutation([True, False] * 150),  # bool
            "E": pd.Categorical(rng_fixed_seed.permutation(["z", "y", "x", "w", "v"] * 60), ordered=True),
1786
1787
        }
    )  # str and ordered categorical
1788
    y = rng_fixed_seed.permutation([0, 1] * 150)
1789
1790
    X_test = pd.DataFrame(
        {
1791
1792
1793
1794
1795
            "A": rng_fixed_seed.permutation(["a", "b", "e"] * 20),  # unseen category
            "B": rng_fixed_seed.permutation([1, 3] * 30),
            "C": rng_fixed_seed.permutation([0.1, -0.1, 0.2, 0.2] * 15),
            "D": rng_fixed_seed.permutation([True, False] * 30),
            "E": pd.Categorical(rng_fixed_seed.permutation(["z", "y"] * 30), ordered=True),
1796
1797
        }
    )
1798
1799
    cat_cols_actual = ["A", "B", "C", "D"]
    cat_cols_to_store = cat_cols_actual + ["E"]
1800
1801
    X[cat_cols_actual] = X[cat_cols_actual].astype("category")
    X_test[cat_cols_actual] = X_test[cat_cols_actual].astype("category")
1802
    cat_values = [X[col].cat.categories.tolist() for col in cat_cols_to_store]
1803
    params = {"objective": "binary", "metric": "binary_logloss", "verbose": -1}
1804
1805
1806
    lgb_train = lgb.Dataset(X, y)
    gbm0 = lgb.train(params, lgb_train, num_boost_round=10)
    pred0 = gbm0.predict(X_test)
1807
    assert lgb_train.categorical_feature == "auto"
1808
1809
1810
1811
    lgb_train = lgb.Dataset(
        X, pd.DataFrame(y), categorical_feature=[0]
    )  # also test that label can be one-column pd.DataFrame
    gbm1 = lgb.train(params, lgb_train, num_boost_round=10)
1812
1813
    pred1 = gbm1.predict(X_test)
    assert lgb_train.categorical_feature == [0]
1814
1815
    lgb_train = lgb.Dataset(X, pd.Series(y), categorical_feature=["A"])  # also test that label can be pd.Series
    gbm2 = lgb.train(params, lgb_train, num_boost_round=10)
1816
    pred2 = gbm2.predict(X_test)
1817
    assert lgb_train.categorical_feature == ["A"]
1818
1819
    lgb_train = lgb.Dataset(X, y, categorical_feature=["A", "B", "C", "D"])
    gbm3 = lgb.train(params, lgb_train, num_boost_round=10)
1820
    pred3 = gbm3.predict(X_test)
1821
    assert lgb_train.categorical_feature == ["A", "B", "C", "D"]
1822
1823
1824
    categorical_model_path = tmp_path / "categorical.model"
    gbm3.save_model(categorical_model_path)
    gbm4 = lgb.Booster(model_file=categorical_model_path)
1825
1826
    pred4 = gbm4.predict(X_test)
    model_str = gbm4.model_to_string()
1827
    gbm4.model_from_string(model_str)
1828
1829
1830
    pred5 = gbm4.predict(X_test)
    gbm5 = lgb.Booster(model_str=model_str)
    pred6 = gbm5.predict(X_test)
1831
1832
    lgb_train = lgb.Dataset(X, y, categorical_feature=["A", "B", "C", "D", "E"])
    gbm6 = lgb.train(params, lgb_train, num_boost_round=10)
1833
    pred7 = gbm6.predict(X_test)
1834
    assert lgb_train.categorical_feature == ["A", "B", "C", "D", "E"]
1835
1836
    lgb_train = lgb.Dataset(X, y, categorical_feature=[])
    gbm7 = lgb.train(params, lgb_train, num_boost_round=10)
1837
1838
    pred8 = gbm7.predict(X_test)
    assert lgb_train.categorical_feature == []
1839
    with pytest.raises(AssertionError):  # noqa: PT011
1840
        np.testing.assert_allclose(pred0, pred1)
1841
    with pytest.raises(AssertionError):  # noqa: PT011
1842
1843
1844
1845
1846
1847
        np.testing.assert_allclose(pred0, pred2)
    np.testing.assert_allclose(pred1, pred2)
    np.testing.assert_allclose(pred0, pred3)
    np.testing.assert_allclose(pred0, pred4)
    np.testing.assert_allclose(pred0, pred5)
    np.testing.assert_allclose(pred0, pred6)
1848
    with pytest.raises(AssertionError):  # noqa: PT011
1849
        np.testing.assert_allclose(pred0, pred7)  # ordered cat features aren't treated as cat features by default
1850
    with pytest.raises(AssertionError):  # noqa: PT011
1851
        np.testing.assert_allclose(pred0, pred8)
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
    assert gbm0.pandas_categorical == cat_values
    assert gbm1.pandas_categorical == cat_values
    assert gbm2.pandas_categorical == cat_values
    assert gbm3.pandas_categorical == cat_values
    assert gbm4.pandas_categorical == cat_values
    assert gbm5.pandas_categorical == cat_values
    assert gbm6.pandas_categorical == cat_values
    assert gbm7.pandas_categorical == cat_values


1862
def test_pandas_sparse(rng):
1863
    pd = pytest.importorskip("pandas")
1864
1865
    X = pd.DataFrame(
        {
1866
1867
1868
            "A": pd.arrays.SparseArray(rng.permutation([0, 1, 2] * 100)),
            "B": pd.arrays.SparseArray(rng.permutation([0.0, 0.1, 0.2, -0.1, 0.2] * 60)),
            "C": pd.arrays.SparseArray(rng.permutation([True, False] * 150)),
1869
1870
        }
    )
1871
    y = pd.Series(pd.arrays.SparseArray(rng.permutation([0, 1] * 150)))
1872
1873
    X_test = pd.DataFrame(
        {
1874
1875
1876
            "A": pd.arrays.SparseArray(rng.permutation([0, 2] * 30)),
            "B": pd.arrays.SparseArray(rng.permutation([0.0, 0.1, 0.2, -0.1] * 15)),
            "C": pd.arrays.SparseArray(rng.permutation([True, False] * 30)),
1877
1878
        }
    )
1879
    for dtype in pd.concat([X.dtypes, X_test.dtypes, pd.Series(y.dtypes)]):
1880
        assert isinstance(dtype, pd.SparseDtype)
1881
    params = {"objective": "binary", "verbose": -1}
1882
1883
1884
    lgb_train = lgb.Dataset(X, y)
    gbm = lgb.train(params, lgb_train, num_boost_round=10)
    pred_sparse = gbm.predict(X_test, raw_score=True)
1885
    if hasattr(X_test, "sparse"):
1886
1887
1888
1889
1890
1891
        pred_dense = gbm.predict(X_test.sparse.to_dense(), raw_score=True)
    else:
        pred_dense = gbm.predict(X_test.to_dense(), raw_score=True)
    np.testing.assert_allclose(pred_sparse, pred_dense)


1892
1893
1894
def test_reference_chain(rng):
    X = rng.normal(size=(100, 2))
    y = rng.normal(size=(100,))
1895
1896
1897
1898
    tmp_dat = lgb.Dataset(X, y)
    # take subsets and train
    tmp_dat_train = tmp_dat.subset(np.arange(80))
    tmp_dat_val = tmp_dat.subset(np.arange(80, 100)).subset(np.arange(18))
1899
    params = {"objective": "regression_l2", "metric": "rmse"}
1900
    evals_result = {}
1901
1902
1903
1904
1905
    lgb.train(
        params,
        tmp_dat_train,
        num_boost_round=20,
        valid_sets=[tmp_dat_train, tmp_dat_val],
1906
        callbacks=[lgb.record_evaluation(evals_result)],
1907
    )
1908
1909
    assert len(evals_result["training"]["rmse"]) == 20
    assert len(evals_result["valid_1"]["rmse"]) == 20
1910
1911
1912
1913
1914
1915


def test_contribs():
    X, y = load_breast_cancer(return_X_y=True)
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
    params = {
1916
1917
1918
        "objective": "binary",
        "metric": "binary_logloss",
        "verbose": -1,
1919
1920
1921
1922
    }
    lgb_train = lgb.Dataset(X_train, y_train)
    gbm = lgb.train(params, lgb_train, num_boost_round=20)

1923
1924
1925
1926
    assert (
        np.linalg.norm(gbm.predict(X_test, raw_score=True) - np.sum(gbm.predict(X_test, pred_contrib=True), axis=1))
        < 1e-4
    )
1927
1928
1929
1930
1931
1932


def test_contribs_sparse():
    n_features = 20
    n_samples = 100
    # generate CSR sparse dataset
1933
1934
1935
    X, y = make_multilabel_classification(
        n_samples=n_samples, sparse=True, n_features=n_features, n_classes=1, n_labels=2
    )
1936
1937
1938
    y = y.flatten()
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
    params = {
1939
1940
        "objective": "binary",
        "verbose": -1,
1941
1942
1943
1944
1945
1946
1947
1948
    }
    lgb_train = lgb.Dataset(X_train, y_train)
    gbm = lgb.train(params, lgb_train, num_boost_round=20)
    contribs_csr = gbm.predict(X_test, pred_contrib=True)
    assert isspmatrix_csr(contribs_csr)
    # convert data to dense and get back same contribs
    contribs_dense = gbm.predict(X_test.toarray(), pred_contrib=True)
    # validate the values are the same
1949
    if platform.machine() == "aarch64":
1950
1951
1952
        np.testing.assert_allclose(contribs_csr.toarray(), contribs_dense, rtol=1, atol=1e-12)
    else:
        np.testing.assert_allclose(contribs_csr.toarray(), contribs_dense)
1953
    assert np.linalg.norm(gbm.predict(X_test, raw_score=True) - np.sum(contribs_dense, axis=1)) < 1e-4
1954
1955
1956
1957
1958
    # validate using CSC matrix
    X_test_csc = X_test.tocsc()
    contribs_csc = gbm.predict(X_test_csc, pred_contrib=True)
    assert isspmatrix_csc(contribs_csc)
    # validate the values are the same
1959
    if platform.machine() == "aarch64":
1960
1961
1962
        np.testing.assert_allclose(contribs_csc.toarray(), contribs_dense, rtol=1, atol=1e-12)
    else:
        np.testing.assert_allclose(contribs_csc.toarray(), contribs_dense)
1963
1964
1965
1966
1967
1968
1969


def test_contribs_sparse_multiclass():
    n_features = 20
    n_samples = 100
    n_labels = 4
    # generate CSR sparse dataset
1970
1971
1972
    X, y = make_multilabel_classification(
        n_samples=n_samples, sparse=True, n_features=n_features, n_classes=1, n_labels=n_labels
    )
1973
1974
1975
    y = y.flatten()
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
    params = {
1976
1977
1978
        "objective": "multiclass",
        "num_class": n_labels,
        "verbose": -1,
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
    }
    lgb_train = lgb.Dataset(X_train, y_train)
    gbm = lgb.train(params, lgb_train, num_boost_round=20)
    contribs_csr = gbm.predict(X_test, pred_contrib=True)
    assert isinstance(contribs_csr, list)
    for perclass_contribs_csr in contribs_csr:
        assert isspmatrix_csr(perclass_contribs_csr)
    # convert data to dense and get back same contribs
    contribs_dense = gbm.predict(X_test.toarray(), pred_contrib=True)
    # validate the values are the same
1989
    contribs_csr_array = np.swapaxes(np.array([sparse_array.toarray() for sparse_array in contribs_csr]), 0, 1)
1990
1991
1992
1993
    contribs_csr_arr_re = contribs_csr_array.reshape(
        (contribs_csr_array.shape[0], contribs_csr_array.shape[1] * contribs_csr_array.shape[2])
    )
    if platform.machine() == "aarch64":
1994
1995
1996
        np.testing.assert_allclose(contribs_csr_arr_re, contribs_dense, rtol=1, atol=1e-12)
    else:
        np.testing.assert_allclose(contribs_csr_arr_re, contribs_dense)
1997
1998
1999
2000
2001
2002
2003
2004
2005
    contribs_dense_re = contribs_dense.reshape(contribs_csr_array.shape)
    assert np.linalg.norm(gbm.predict(X_test, raw_score=True) - np.sum(contribs_dense_re, axis=2)) < 1e-4
    # validate using CSC matrix
    X_test_csc = X_test.tocsc()
    contribs_csc = gbm.predict(X_test_csc, pred_contrib=True)
    assert isinstance(contribs_csc, list)
    for perclass_contribs_csc in contribs_csc:
        assert isspmatrix_csc(perclass_contribs_csc)
    # validate the values are the same
2006
    contribs_csc_array = np.swapaxes(np.array([sparse_array.toarray() for sparse_array in contribs_csc]), 0, 1)
2007
2008
2009
2010
    contribs_csc_array = contribs_csc_array.reshape(
        (contribs_csc_array.shape[0], contribs_csc_array.shape[1] * contribs_csc_array.shape[2])
    )
    if platform.machine() == "aarch64":
2011
2012
2013
        np.testing.assert_allclose(contribs_csc_array, contribs_dense, rtol=1, atol=1e-12)
    else:
        np.testing.assert_allclose(contribs_csc_array, contribs_dense)
2014
2015


2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
# @pytest.mark.skipif(psutil.virtual_memory().available / 1024 / 1024 / 1024 < 3, reason="not enough RAM")
# def test_int32_max_sparse_contribs(rng):
#     params = {"objective": "binary"}
#     train_features = rng.uniform(size=(100, 1000))
#     train_targets = [0] * 50 + [1] * 50
#     lgb_train = lgb.Dataset(train_features, train_targets)
#     gbm = lgb.train(params, lgb_train, num_boost_round=2)
#     csr_input_shape = (3000000, 1000)
#     test_features = csr_matrix(csr_input_shape)
#     for i in range(0, csr_input_shape[0], csr_input_shape[0] // 6):
#         for j in range(0, 1000, 100):
#             test_features[i, j] = random.random()
#     y_pred_csr = gbm.predict(test_features, pred_contrib=True)
#     # Note there is an extra column added to the output for the expected value
#     csr_output_shape = (csr_input_shape[0], csr_input_shape[1] + 1)
#     assert y_pred_csr.shape == csr_output_shape
#     y_pred_csc = gbm.predict(test_features.tocsc(), pred_contrib=True)
#     # Note output CSC shape should be same as CSR output shape
#     assert y_pred_csc.shape == csr_output_shape


def test_sliced_data(rng):
2038
2039
2040
    def train_and_get_predictions(features, labels):
        dataset = lgb.Dataset(features, label=labels)
        lgb_params = {
2041
2042
2043
            "application": "binary",
            "verbose": -1,
            "min_data": 5,
2044
        }
2045
2046
2047
2048
2049
2050
2051
2052
        gbm = lgb.train(
            params=lgb_params,
            train_set=dataset,
            num_boost_round=10,
        )
        return gbm.predict(features)

    num_samples = 100
2053
    features = rng.uniform(size=(num_samples, 5))
2054
    positive_samples = int(num_samples * 0.25)
2055
2056
2057
    labels = np.append(
        np.ones(positive_samples, dtype=np.float32), np.zeros(num_samples - positive_samples, dtype=np.float32)
    )
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
    # test sliced labels
    origin_pred = train_and_get_predictions(features, labels)
    stacked_labels = np.column_stack((labels, np.ones(num_samples, dtype=np.float32)))
    sliced_labels = stacked_labels[:, 0]
    sliced_pred = train_and_get_predictions(features, sliced_labels)
    np.testing.assert_allclose(origin_pred, sliced_pred)
    # append some columns
    stacked_features = np.column_stack((np.ones(num_samples, dtype=np.float32), features))
    stacked_features = np.column_stack((np.ones(num_samples, dtype=np.float32), stacked_features))
    stacked_features = np.column_stack((stacked_features, np.ones(num_samples, dtype=np.float32)))
    stacked_features = np.column_stack((stacked_features, np.ones(num_samples, dtype=np.float32)))
    # append some rows
    stacked_features = np.concatenate((np.ones(9, dtype=np.float32).reshape((1, 9)), stacked_features), axis=0)
    stacked_features = np.concatenate((np.ones(9, dtype=np.float32).reshape((1, 9)), stacked_features), axis=0)
    stacked_features = np.concatenate((stacked_features, np.ones(9, dtype=np.float32).reshape((1, 9))), axis=0)
    stacked_features = np.concatenate((stacked_features, np.ones(9, dtype=np.float32).reshape((1, 9))), axis=0)
    # test sliced 2d matrix
    sliced_features = stacked_features[2:102, 2:7]
    assert np.all(sliced_features == features)
    sliced_pred = train_and_get_predictions(sliced_features, sliced_labels)
    np.testing.assert_allclose(origin_pred, sliced_pred)
    # test sliced CSR
    stacked_csr = csr_matrix(stacked_features)
    sliced_csr = stacked_csr[2:102, 2:7]
    assert np.all(sliced_csr == features)
    sliced_pred = train_and_get_predictions(sliced_csr, sliced_labels)
    np.testing.assert_allclose(origin_pred, sliced_pred)


2087
def test_init_with_subset(tmp_path, rng):
2088
    data = rng.uniform(size=(50, 2))
2089
2090
    y = [1] * 25 + [0] * 25
    lgb_train = lgb.Dataset(data, y, free_raw_data=False)
2091
    subset_index_1 = rng.choice(a=np.arange(50), size=30, replace=False)
2092
    subset_data_1 = lgb_train.subset(subset_index_1)
2093
    subset_index_2 = rng.choice(a=np.arange(50), size=20, replace=False)
2094
    subset_data_2 = lgb_train.subset(subset_index_2)
2095
2096
2097
    params = {"objective": "binary", "verbose": -1}
    init_gbm = lgb.train(params=params, train_set=subset_data_1, num_boost_round=10, keep_training_booster=True)
    lgb.train(params=params, train_set=subset_data_2, num_boost_round=10, init_model=init_gbm)
2098
2099
2100
    assert lgb_train.get_data().shape[0] == 50
    assert subset_data_1.get_data().shape[0] == 30
    assert subset_data_2.get_data().shape[0] == 20
2101
2102
2103
    lgb_train_data = str(tmp_path / "lgb_train_data.bin")
    lgb_train.save_binary(lgb_train_data)
    lgb_train_from_file = lgb.Dataset(lgb_train_data, free_raw_data=False)
2104
2105
    subset_data_3 = lgb_train_from_file.subset(subset_index_1)
    subset_data_4 = lgb_train_from_file.subset(subset_index_2)
2106
    init_gbm_2 = lgb.train(params=params, train_set=subset_data_3, num_boost_round=10, keep_training_booster=True)
2107
    with np.testing.assert_raises_regex(lgb.basic.LightGBMError, "Unknown format of training data"):
2108
        lgb.train(params=params, train_set=subset_data_4, num_boost_round=10, init_model=init_gbm_2)
2109
2110
2111
    assert lgb_train_from_file.get_data() == lgb_train_data
    assert subset_data_3.get_data() == lgb_train_data
    assert subset_data_4.get_data() == lgb_train_data
2112
2113


2114
2115
2116
def test_training_on_constructed_subset_without_params(rng):
    X = rng.uniform(size=(100, 10))
    y = rng.uniform(size=(100,))
2117
2118
2119
2120
2121
2122
2123
2124
2125
    lgb_data = lgb.Dataset(X, y)
    subset_indices = [1, 2, 3, 4]
    subset = lgb_data.subset(subset_indices).construct()
    bst = lgb.train({}, subset, num_boost_round=1)
    assert subset.get_params() == {}
    assert subset.num_data() == len(subset_indices)
    assert bst.current_iteration() == 1


2126
2127
def generate_trainset_for_monotone_constraints_tests(x3_to_category=True):
    number_of_dpoints = 3000
2128
2129
2130
2131
    rng = np.random.default_rng()
    x1_positively_correlated_with_y = rng.uniform(size=number_of_dpoints)
    x2_negatively_correlated_with_y = rng.uniform(size=number_of_dpoints)
    x3_negatively_correlated_with_y = rng.uniform(size=number_of_dpoints)
2132
    x = np.column_stack(
2133
2134
        (
            x1_positively_correlated_with_y,
2135
            x2_negatively_correlated_with_y,
2136
2137
2138
            categorize(x3_negatively_correlated_with_y) if x3_to_category else x3_negatively_correlated_with_y,
        )
    )
2139

2140
2141
    zs = rng.normal(loc=0.0, scale=0.01, size=number_of_dpoints)
    scales = 10.0 * (rng.uniform(size=6) + 0.5)
2142
2143
2144
2145
2146
2147
2148
2149
2150
    y = (
        scales[0] * x1_positively_correlated_with_y
        + np.sin(scales[1] * np.pi * x1_positively_correlated_with_y)
        - scales[2] * x2_negatively_correlated_with_y
        - np.cos(scales[3] * np.pi * x2_negatively_correlated_with_y)
        - scales[4] * x3_negatively_correlated_with_y
        - np.cos(scales[5] * np.pi * x3_negatively_correlated_with_y)
        + zs
    )
2151
2152
2153
    categorical_features = []
    if x3_to_category:
        categorical_features = [2]
2154
    return lgb.Dataset(x, label=y, categorical_feature=categorical_features, free_raw_data=False)
2155
2156


2157
@pytest.mark.skipif(getenv("TASK", "") == "cuda", reason="Monotone constraints are not yet supported by CUDA version")
2158
2159
@pytest.mark.parametrize("test_with_categorical_variable", [True, False])
def test_monotone_constraints(test_with_categorical_variable):
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
    def is_increasing(y):
        return (np.diff(y) >= 0.0).all()

    def is_decreasing(y):
        return (np.diff(y) <= 0.0).all()

    def is_non_monotone(y):
        return (np.diff(y) < 0.0).any() and (np.diff(y) > 0.0).any()

    def is_correctly_constrained(learner, x3_to_category=True):
        iterations = 10
        n = 1000
        variable_x = np.linspace(0, 1, n).reshape((n, 1))
        fixed_xs_values = np.linspace(0, 1, n)
        for i in range(iterations):
            fixed_x = fixed_xs_values[i] * np.ones((n, 1))
            monotonically_increasing_x = np.column_stack((variable_x, fixed_x, fixed_x))
            monotonically_increasing_y = learner.predict(monotonically_increasing_x)
            monotonically_decreasing_x = np.column_stack((fixed_x, variable_x, fixed_x))
            monotonically_decreasing_y = learner.predict(monotonically_decreasing_x)
2180
2181
2182
2183
2184
2185
2186
            non_monotone_x = np.column_stack(
                (
                    fixed_x,
                    fixed_x,
                    categorize(variable_x) if x3_to_category else variable_x,
                )
            )
2187
            non_monotone_y = learner.predict(non_monotone_x)
2188
2189
2190
2191
2192
            if not (
                is_increasing(monotonically_increasing_y)
                and is_decreasing(monotonically_decreasing_y)
                and is_non_monotone(non_monotone_y)
            ):
2193
                return False
2194
        return True
2195

2196
2197
2198
2199
2200
2201
2202
2203
    def are_interactions_enforced(gbm, feature_sets):
        def parse_tree_features(gbm):
            # trees start at position 1.
            tree_str = gbm.model_to_string().split("Tree")[1:]
            feature_sets = []
            for tree in tree_str:
                # split_features are in 4th line.
                features = tree.splitlines()[3].split("=")[1].split(" ")
2204
                features = {f"Column_{f}" for f in features}
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
                feature_sets.append(features)
            return np.array(feature_sets)

        def has_interaction(treef):
            n = 0
            for fs in feature_sets:
                if len(treef.intersection(fs)) > 0:
                    n += 1
            return n > 1

        tree_features = parse_tree_features(gbm)
2216
        has_interaction_flag = np.array([has_interaction(treef) for treef in tree_features])
2217
2218
2219

        return not has_interaction_flag.any()

2220
    trainset = generate_trainset_for_monotone_constraints_tests(test_with_categorical_variable)
2221
    for test_with_interaction_constraints in [True, False]:
2222
        error_msg = (
2223
            f"Model not correctly constrained (test_with_interaction_constraints={test_with_interaction_constraints})"
2224
        )
2225
        for monotone_constraints_method in ["basic", "intermediate", "advanced"]:
2226
            params = {
2227
2228
2229
                "min_data": 20,
                "num_leaves": 20,
                "monotone_constraints": [1, -1, 0],
2230
                "monotone_constraints_method": monotone_constraints_method,
2231
                "use_missing": False,
2232
            }
2233
2234
            if test_with_interaction_constraints:
                params["interaction_constraints"] = [[0], [1], [2]]
2235
            constrained_model = lgb.train(params, trainset)
2236
            assert is_correctly_constrained(constrained_model, test_with_categorical_variable), error_msg
2237
2238
2239
            if test_with_interaction_constraints:
                feature_sets = [["Column_0"], ["Column_1"], "Column_2"]
                assert are_interactions_enforced(constrained_model, feature_sets)
2240
2241


2242
@pytest.mark.skipif(getenv("TASK", "") == "cuda", reason="Monotone constraints are not yet supported by CUDA version")
2243
2244
2245
2246
2247
2248
2249
2250
def test_monotone_penalty():
    def are_first_splits_non_monotone(tree, n, monotone_constraints):
        if n <= 0:
            return True
        if "leaf_value" in tree:
            return True
        if monotone_constraints[tree["split_feature"]] != 0:
            return False
2251
2252
2253
        return are_first_splits_non_monotone(
            tree["left_child"], n - 1, monotone_constraints
        ) and are_first_splits_non_monotone(tree["right_child"], n - 1, monotone_constraints)
2254
2255
2256
2257
2258
2259

    def are_there_monotone_splits(tree, monotone_constraints):
        if "leaf_value" in tree:
            return False
        if monotone_constraints[tree["split_feature"]] != 0:
            return True
2260
2261
2262
        return are_there_monotone_splits(tree["left_child"], monotone_constraints) or are_there_monotone_splits(
            tree["right_child"], monotone_constraints
        )
2263
2264
2265
2266
2267
2268

    max_depth = 5
    monotone_constraints = [1, -1, 0]
    penalization_parameter = 2.0
    trainset = generate_trainset_for_monotone_constraints_tests(x3_to_category=False)
    for monotone_constraints_method in ["basic", "intermediate", "advanced"]:
2269
        params = {
2270
2271
2272
            "max_depth": max_depth,
            "monotone_constraints": monotone_constraints,
            "monotone_penalty": penalization_parameter,
2273
            "monotone_constraints_method": monotone_constraints_method,
2274
        }
2275
2276
2277
        constrained_model = lgb.train(params, trainset, 10)
        dumped_model = constrained_model.dump_model()["tree_info"]
        for tree in dumped_model:
2278
2279
2280
            assert are_first_splits_non_monotone(
                tree["tree_structure"], int(penalization_parameter), monotone_constraints
            )
2281
2282
2283
2284
            assert are_there_monotone_splits(tree["tree_structure"], monotone_constraints)


# test if a penalty as high as the depth indeed prohibits all monotone splits
2285
@pytest.mark.skipif(getenv("TASK", "") == "cuda", reason="Monotone constraints are not yet supported by CUDA version")
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
def test_monotone_penalty_max():
    max_depth = 5
    monotone_constraints = [1, -1, 0]
    penalization_parameter = max_depth
    trainset_constrained_model = generate_trainset_for_monotone_constraints_tests(x3_to_category=False)
    x = trainset_constrained_model.data
    y = trainset_constrained_model.label
    x3_negatively_correlated_with_y = x[:, 2]
    trainset_unconstrained_model = lgb.Dataset(x3_negatively_correlated_with_y.reshape(-1, 1), label=y)
    params_constrained_model = {
2296
2297
        "monotone_constraints": monotone_constraints,
        "monotone_penalty": penalization_parameter,
2298
2299
2300
2301
2302
2303
2304
2305
2306
        "max_depth": max_depth,
        "gpu_use_dp": True,
    }
    params_unconstrained_model = {
        "max_depth": max_depth,
        "gpu_use_dp": True,
    }

    unconstrained_model = lgb.train(params_unconstrained_model, trainset_unconstrained_model, 10)
2307
    unconstrained_model_predictions = unconstrained_model.predict(x3_negatively_correlated_with_y.reshape(-1, 1))
2308
2309
2310
2311
2312
2313
2314

    for monotone_constraints_method in ["basic", "intermediate", "advanced"]:
        params_constrained_model["monotone_constraints_method"] = monotone_constraints_method
        # The penalization is so high that the first 2 features should not be used here
        constrained_model = lgb.train(params_constrained_model, trainset_constrained_model, 10)

        # Check that a very high penalization is the same as not using the features at all
2315
        np_assert_array_equal(constrained_model.predict(x), unconstrained_model_predictions, strict=True)
2316
2317
2318
2319
2320
2321
2322
2323
2324


def test_max_bin_by_feature():
    col1 = np.arange(0, 100)[:, np.newaxis]
    col2 = np.zeros((100, 1))
    col2[20:] = 1
    X = np.concatenate([col1, col2], axis=1)
    y = np.arange(0, 100)
    params = {
2325
2326
2327
2328
2329
2330
2331
        "objective": "regression_l2",
        "verbose": -1,
        "num_leaves": 100,
        "min_data_in_leaf": 1,
        "min_sum_hessian_in_leaf": 0,
        "min_data_in_bin": 1,
        "max_bin_by_feature": [100, 2],
2332
2333
2334
2335
    }
    lgb_data = lgb.Dataset(X, label=y)
    est = lgb.train(params, lgb_data, num_boost_round=1)
    assert len(np.unique(est.predict(X))) == 100
2336
    params["max_bin_by_feature"] = [2, 100]
2337
2338
2339
2340
2341
    lgb_data = lgb.Dataset(X, label=y)
    est = lgb.train(params, lgb_data, num_boost_round=1)
    assert len(np.unique(est.predict(X))) == 3


2342
2343
def test_small_max_bin(rng_fixed_seed):
    y = rng_fixed_seed.choice([0, 1], 100)
2344
    x = np.ones((100, 1))
2345
2346
    x[:30, 0] = -1
    x[60:, 0] = 2
2347
    params = {"objective": "binary", "seed": 0, "min_data_in_leaf": 1, "verbose": -1, "max_bin": 2}
2348
2349
2350
    lgb_x = lgb.Dataset(x, label=y)
    lgb.train(params, lgb_x, num_boost_round=5)
    x[0, 0] = np.nan
2351
    params["max_bin"] = 3
2352
2353
2354
2355
2356
2357
2358
    lgb_x = lgb.Dataset(x, label=y)
    lgb.train(params, lgb_x, num_boost_round=5)


def test_refit():
    X, y = load_breast_cancer(return_X_y=True)
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
2359
    params = {"objective": "binary", "metric": "binary_logloss", "verbose": -1, "min_data": 10}
2360
2361
2362
2363
2364
2365
2366
2367
    lgb_train = lgb.Dataset(X_train, y_train)
    gbm = lgb.train(params, lgb_train, num_boost_round=20)
    err_pred = log_loss(y_test, gbm.predict(X_test))
    new_gbm = gbm.refit(X_test, y_test)
    new_err_pred = log_loss(y_test, new_gbm.predict(X_test))
    assert err_pred > new_err_pred


2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
def test_refit_with_one_tree_regression():
    X, y = make_synthetic_regression(n_samples=1_000, n_features=2)
    lgb_train = lgb.Dataset(X, label=y)
    params = {"objective": "regression", "verbosity": -1}
    model = lgb.train(params, lgb_train, num_boost_round=1)
    model_refit = model.refit(X, y)
    assert isinstance(model_refit, lgb.Booster)


def test_refit_with_one_tree_binary_classification():
    X, y = load_breast_cancer(return_X_y=True)
    lgb_train = lgb.Dataset(X, label=y)
    params = {"objective": "binary", "verbosity": -1}
    model = lgb.train(params, lgb_train, num_boost_round=1)
    model_refit = model.refit(X, y)
    assert isinstance(model_refit, lgb.Booster)


def test_refit_with_one_tree_multiclass_classification():
    X, y = load_iris(return_X_y=True)
    lgb_train = lgb.Dataset(X, y)
    params = {"objective": "multiclass", "num_class": 3, "verbose": -1}
    model = lgb.train(params, lgb_train, num_boost_round=1)
    model_refit = model.refit(X, y)
    assert isinstance(model_refit, lgb.Booster)


2395
def test_refit_dataset_params(rng):
2396
2397
2398
    # check refit accepts dataset_params
    X, y = load_breast_cancer(return_X_y=True)
    lgb_train = lgb.Dataset(X, y, init_score=np.zeros(y.size))
2399
    train_params = {"objective": "binary", "verbose": -1, "seed": 123}
2400
2401
    gbm = lgb.train(train_params, lgb_train, num_boost_round=10)
    non_weight_err_pred = log_loss(y, gbm.predict(X))
2402
    refit_weight = rng.uniform(size=(y.shape[0],))
2403
    dataset_params = {
2404
2405
2406
        "max_bin": 260,
        "min_data_in_bin": 5,
        "data_random_seed": 123,
2407
2408
2409
2410
2411
2412
2413
2414
2415
2416
2417
2418
2419
2420
2421
2422
2423
2424
    }
    new_gbm = gbm.refit(
        data=X,
        label=y,
        weight=refit_weight,
        dataset_params=dataset_params,
        decay_rate=0.0,
    )
    weight_err_pred = log_loss(y, new_gbm.predict(X))
    train_set_params = new_gbm.train_set.get_params()
    stored_weights = new_gbm.train_set.get_weight()
    assert weight_err_pred != non_weight_err_pred
    assert train_set_params["max_bin"] == 260
    assert train_set_params["min_data_in_bin"] == 5
    assert train_set_params["data_random_seed"] == 123
    np.testing.assert_allclose(stored_weights, refit_weight)


2425
@pytest.mark.parametrize("boosting_type", ["rf", "dart"])
2426
2427
2428
def test_mape_for_specific_boosting_types(boosting_type):
    X, y = make_synthetic_regression()
    y = abs(y)
2429
    params = {
2430
2431
2432
2433
2434
2435
2436
        "boosting_type": boosting_type,
        "objective": "mape",
        "verbose": -1,
        "bagging_freq": 1,
        "bagging_fraction": 0.8,
        "feature_fraction": 0.8,
        "boost_from_average": True,
2437
2438
2439
2440
2441
    }
    lgb_train = lgb.Dataset(X, y)
    gbm = lgb.train(params, lgb_train, num_boost_round=20)
    pred = gbm.predict(X)
    pred_mean = pred.mean()
2442
2443
2444
    # the following checks that dart and rf with mape can predict outside the 0-1 range
    # https://github.com/microsoft/LightGBM/issues/1579
    assert pred_mean > 8
2445
2446
2447
2448
2449
2450


def check_constant_features(y_true, expected_pred, more_params):
    X_train = np.ones((len(y_true), 1))
    y_train = np.array(y_true)
    params = {
2451
2452
2453
2454
2455
2456
2457
2458
        "objective": "regression",
        "num_class": 1,
        "verbose": -1,
        "min_data": 1,
        "num_leaves": 2,
        "learning_rate": 1,
        "min_data_in_bin": 1,
        "boost_from_average": True,
2459
2460
2461
2462
2463
2464
2465
2466
2467
    }
    params.update(more_params)
    lgb_train = lgb.Dataset(X_train, y_train, params=params)
    gbm = lgb.train(params, lgb_train, num_boost_round=2)
    pred = gbm.predict(X_train)
    assert np.allclose(pred, expected_pred)


def test_constant_features_regression():
2468
    params = {"objective": "regression"}
2469
2470
2471
2472
2473
2474
    check_constant_features([0.0, 10.0, 0.0, 10.0], 5.0, params)
    check_constant_features([0.0, 1.0, 2.0, 3.0], 1.5, params)
    check_constant_features([-1.0, 1.0, -2.0, 2.0], 0.0, params)


def test_constant_features_binary():
2475
    params = {"objective": "binary"}
2476
2477
2478
2479
2480
    check_constant_features([0.0, 10.0, 0.0, 10.0], 0.5, params)
    check_constant_features([0.0, 1.0, 2.0, 3.0], 0.75, params)


def test_constant_features_multiclass():
2481
    params = {"objective": "multiclass", "num_class": 3}
2482
2483
2484
2485
2486
    check_constant_features([0.0, 1.0, 2.0, 0.0], [0.5, 0.25, 0.25], params)
    check_constant_features([0.0, 1.0, 2.0, 1.0], [0.25, 0.5, 0.25], params)


def test_constant_features_multiclassova():
2487
    params = {"objective": "multiclassova", "num_class": 3}
2488
2489
2490
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500
2501
    check_constant_features([0.0, 1.0, 2.0, 0.0], [0.5, 0.25, 0.25], params)
    check_constant_features([0.0, 1.0, 2.0, 1.0], [0.25, 0.5, 0.25], params)


def test_fpreproc():
    def preprocess_data(dtrain, dtest, params):
        train_data = dtrain.construct().get_data()
        test_data = dtest.construct().get_data()
        train_data[:, 0] += 1
        test_data[:, 0] += 1
        dtrain.label[-5:] = 3
        dtest.label[-5:] = 3
        dtrain = lgb.Dataset(train_data, dtrain.label)
        dtest = lgb.Dataset(test_data, dtest.label, reference=dtrain)
2502
        params["num_class"] = 4
2503
2504
2505
2506
        return dtrain, dtest, params

    X, y = load_iris(return_X_y=True)
    dataset = lgb.Dataset(X, y, free_raw_data=False)
2507
    params = {"objective": "multiclass", "num_class": 3, "verbose": -1}
2508
    results = lgb.cv(params, dataset, num_boost_round=10, fpreproc=preprocess_data)
2509
2510
    assert "valid multi_logloss-mean" in results
    assert len(results["valid multi_logloss-mean"]) == 10
2511
2512
2513
2514
2515


def test_metrics():
    X, y = load_digits(n_class=2, return_X_y=True)
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
2516
2517
    lgb_train = lgb.Dataset(X_train, y_train)
    lgb_valid = lgb.Dataset(X_test, y_test, reference=lgb_train)
2518
2519

    evals_result = {}
2520
2521
2522
2523
2524
2525
2526
2527
2528
2529
2530
2531
2532
2533
2534
2535
2536
2537
2538
2539
2540
    params_dummy_obj_verbose = {"verbose": -1, "objective": dummy_obj}
    params_obj_verbose = {"objective": "binary", "verbose": -1}
    params_obj_metric_log_verbose = {"objective": "binary", "metric": "binary_logloss", "verbose": -1}
    params_obj_metric_err_verbose = {"objective": "binary", "metric": "binary_error", "verbose": -1}
    params_obj_metric_inv_verbose = {"objective": "binary", "metric": "invalid_metric", "verbose": -1}
    params_obj_metric_quant_verbose = {"objective": "regression", "metric": "quantile", "verbose": 2}
    params_obj_metric_multi_verbose = {
        "objective": "binary",
        "metric": ["binary_logloss", "binary_error"],
        "verbose": -1,
    }
    params_obj_metric_none_verbose = {"objective": "binary", "metric": "None", "verbose": -1}
    params_dummy_obj_metric_log_verbose = {"objective": dummy_obj, "metric": "binary_logloss", "verbose": -1}
    params_dummy_obj_metric_err_verbose = {"objective": dummy_obj, "metric": "binary_error", "verbose": -1}
    params_dummy_obj_metric_inv_verbose = {"objective": dummy_obj, "metric_types": "invalid_metric", "verbose": -1}
    params_dummy_obj_metric_multi_verbose = {
        "objective": dummy_obj,
        "metric": ["binary_logloss", "binary_error"],
        "verbose": -1,
    }
    params_dummy_obj_metric_none_verbose = {"objective": dummy_obj, "metric": "None", "verbose": -1}
2541
2542

    def get_cv_result(params=params_obj_verbose, **kwargs):
2543
        return lgb.cv(params, lgb_train, num_boost_round=2, **kwargs)
2544
2545

    def train_booster(params=params_obj_verbose, **kwargs):
2546
2547
2548
2549
2550
2551
        lgb.train(
            params,
            lgb_train,
            num_boost_round=2,
            valid_sets=[lgb_valid],
            callbacks=[lgb.record_evaluation(evals_result)],
2552
            **kwargs,
2553
        )
2554

2555
    # no custom objective, no feval
2556
2557
2558
    # default metric
    res = get_cv_result()
    assert len(res) == 2
2559
    assert "valid binary_logloss-mean" in res
2560
2561
2562
2563

    # non-default metric in params
    res = get_cv_result(params=params_obj_metric_err_verbose)
    assert len(res) == 2
2564
    assert "valid binary_error-mean" in res
2565
2566

    # default metric in args
2567
    res = get_cv_result(metrics="binary_logloss")
2568
    assert len(res) == 2
2569
    assert "valid binary_logloss-mean" in res
2570
2571

    # non-default metric in args
2572
    res = get_cv_result(metrics="binary_error")
2573
    assert len(res) == 2
2574
    assert "valid binary_error-mean" in res
2575
2576

    # metric in args overwrites one in params
2577
    res = get_cv_result(params=params_obj_metric_inv_verbose, metrics="binary_error")
2578
    assert len(res) == 2
2579
    assert "valid binary_error-mean" in res
2580

2581
2582
2583
    # metric in args overwrites one in params
    res = get_cv_result(params=params_obj_metric_quant_verbose)
    assert len(res) == 2
2584
    assert "valid quantile-mean" in res
2585

2586
2587
2588
    # multiple metrics in params
    res = get_cv_result(params=params_obj_metric_multi_verbose)
    assert len(res) == 4
2589
2590
    assert "valid binary_logloss-mean" in res
    assert "valid binary_error-mean" in res
2591
2592

    # multiple metrics in args
2593
    res = get_cv_result(metrics=["binary_logloss", "binary_error"])
2594
    assert len(res) == 4
2595
2596
    assert "valid binary_logloss-mean" in res
    assert "valid binary_error-mean" in res
2597
2598

    # remove default metric by 'None' in list
2599
    res = get_cv_result(metrics=["None"])
2600
2601
2602
    assert len(res) == 0

    # remove default metric by 'None' aliases
2603
    for na_alias in ("None", "na", "null", "custom"):
2604
2605
2606
        res = get_cv_result(metrics=na_alias)
        assert len(res) == 0

2607
    # custom objective, no feval
2608
    # no default metric
2609
    res = get_cv_result(params=params_dummy_obj_verbose)
2610
2611
2612
    assert len(res) == 0

    # metric in params
2613
    res = get_cv_result(params=params_dummy_obj_metric_err_verbose)
2614
    assert len(res) == 2
2615
    assert "valid binary_error-mean" in res
2616
2617

    # metric in args
2618
    res = get_cv_result(params=params_dummy_obj_verbose, metrics="binary_error")
2619
    assert len(res) == 2
2620
    assert "valid binary_error-mean" in res
2621
2622

    # metric in args overwrites its' alias in params
2623
    res = get_cv_result(params=params_dummy_obj_metric_inv_verbose, metrics="binary_error")
2624
    assert len(res) == 2
2625
    assert "valid binary_error-mean" in res
2626
2627

    # multiple metrics in params
2628
    res = get_cv_result(params=params_dummy_obj_metric_multi_verbose)
2629
    assert len(res) == 4
2630
2631
    assert "valid binary_logloss-mean" in res
    assert "valid binary_error-mean" in res
2632
2633

    # multiple metrics in args
2634
    res = get_cv_result(params=params_dummy_obj_verbose, metrics=["binary_logloss", "binary_error"])
2635
    assert len(res) == 4
2636
2637
    assert "valid binary_logloss-mean" in res
    assert "valid binary_error-mean" in res
2638

2639
    # no custom objective, feval
2640
2641
2642
    # default metric with custom one
    res = get_cv_result(feval=constant_metric)
    assert len(res) == 4
2643
2644
    assert "valid binary_logloss-mean" in res
    assert "valid error-mean" in res
2645
2646
2647
2648

    # non-default metric in params with custom one
    res = get_cv_result(params=params_obj_metric_err_verbose, feval=constant_metric)
    assert len(res) == 4
2649
2650
    assert "valid binary_error-mean" in res
    assert "valid error-mean" in res
2651
2652

    # default metric in args with custom one
2653
    res = get_cv_result(metrics="binary_logloss", feval=constant_metric)
2654
    assert len(res) == 4
2655
2656
    assert "valid binary_logloss-mean" in res
    assert "valid error-mean" in res
2657

2658
2659
2660
2661
2662
2663
2664
    # default metric in args with 1 custom function returning a list of 2 metrics
    res = get_cv_result(metrics="binary_logloss", feval=constant_metric_multi)
    assert len(res) == 6
    assert "valid binary_logloss-mean" in res
    assert res["valid important_metric-mean"] == [1.5, 1.5]
    assert res["valid irrelevant_metric-mean"] == [7.8, 7.8]

2665
    # non-default metric in args with custom one
2666
    res = get_cv_result(metrics="binary_error", feval=constant_metric)
2667
    assert len(res) == 4
2668
2669
    assert "valid binary_error-mean" in res
    assert "valid error-mean" in res
2670
2671

    # metric in args overwrites one in params, custom one is evaluated too
2672
    res = get_cv_result(params=params_obj_metric_inv_verbose, metrics="binary_error", feval=constant_metric)
2673
    assert len(res) == 4
2674
2675
    assert "valid binary_error-mean" in res
    assert "valid error-mean" in res
2676
2677
2678
2679

    # multiple metrics in params with custom one
    res = get_cv_result(params=params_obj_metric_multi_verbose, feval=constant_metric)
    assert len(res) == 6
2680
2681
2682
    assert "valid binary_logloss-mean" in res
    assert "valid binary_error-mean" in res
    assert "valid error-mean" in res
2683
2684

    # multiple metrics in args with custom one
2685
    res = get_cv_result(metrics=["binary_logloss", "binary_error"], feval=constant_metric)
2686
    assert len(res) == 6
2687
2688
2689
    assert "valid binary_logloss-mean" in res
    assert "valid binary_error-mean" in res
    assert "valid error-mean" in res
2690
2691

    # custom metric is evaluated despite 'None' is passed
2692
    res = get_cv_result(metrics=["None"], feval=constant_metric)
2693
    assert len(res) == 2
2694
    assert "valid error-mean" in res
2695

2696
    # custom objective, feval
2697
    # no default metric, only custom one
2698
    res = get_cv_result(params=params_dummy_obj_verbose, feval=constant_metric)
2699
    assert len(res) == 2
2700
    assert "valid error-mean" in res
2701
2702

    # metric in params with custom one
2703
    res = get_cv_result(params=params_dummy_obj_metric_err_verbose, feval=constant_metric)
2704
    assert len(res) == 4
2705
2706
    assert "valid binary_error-mean" in res
    assert "valid error-mean" in res
2707
2708

    # metric in args with custom one
2709
    res = get_cv_result(params=params_dummy_obj_verbose, feval=constant_metric, metrics="binary_error")
2710
    assert len(res) == 4
2711
2712
    assert "valid binary_error-mean" in res
    assert "valid error-mean" in res
2713
2714

    # metric in args overwrites one in params, custom one is evaluated too
2715
    res = get_cv_result(params=params_dummy_obj_metric_inv_verbose, feval=constant_metric, metrics="binary_error")
2716
    assert len(res) == 4
2717
2718
    assert "valid binary_error-mean" in res
    assert "valid error-mean" in res
2719
2720

    # multiple metrics in params with custom one
2721
    res = get_cv_result(params=params_dummy_obj_metric_multi_verbose, feval=constant_metric)
2722
    assert len(res) == 6
2723
2724
2725
    assert "valid binary_logloss-mean" in res
    assert "valid binary_error-mean" in res
    assert "valid error-mean" in res
2726
2727

    # multiple metrics in args with custom one
2728
2729
2730
    res = get_cv_result(
        params=params_dummy_obj_verbose, feval=constant_metric, metrics=["binary_logloss", "binary_error"]
    )
2731
    assert len(res) == 6
2732
2733
2734
    assert "valid binary_logloss-mean" in res
    assert "valid binary_error-mean" in res
    assert "valid error-mean" in res
2735
2736

    # custom metric is evaluated despite 'None' is passed
2737
    res = get_cv_result(params=params_dummy_obj_metric_none_verbose, feval=constant_metric)
2738
    assert len(res) == 2
2739
    assert "valid error-mean" in res
2740

2741
    # no custom objective, no feval
2742
2743
    # default metric
    train_booster()
2744
2745
    assert len(evals_result["valid_0"]) == 1
    assert "binary_logloss" in evals_result["valid_0"]
2746
2747
2748

    # default metric in params
    train_booster(params=params_obj_metric_log_verbose)
2749
2750
    assert len(evals_result["valid_0"]) == 1
    assert "binary_logloss" in evals_result["valid_0"]
2751
2752
2753

    # non-default metric in params
    train_booster(params=params_obj_metric_err_verbose)
2754
2755
    assert len(evals_result["valid_0"]) == 1
    assert "binary_error" in evals_result["valid_0"]
2756
2757
2758

    # multiple metrics in params
    train_booster(params=params_obj_metric_multi_verbose)
2759
2760
2761
    assert len(evals_result["valid_0"]) == 2
    assert "binary_logloss" in evals_result["valid_0"]
    assert "binary_error" in evals_result["valid_0"]
2762
2763

    # remove default metric by 'None' aliases
2764
2765
    for na_alias in ("None", "na", "null", "custom"):
        params = {"objective": "binary", "metric": na_alias, "verbose": -1}
2766
2767
2768
        train_booster(params=params)
        assert len(evals_result) == 0

2769
    # custom objective, no feval
2770
    # no default metric
2771
    train_booster(params=params_dummy_obj_verbose)
2772
2773
2774
    assert len(evals_result) == 0

    # metric in params
2775
    train_booster(params=params_dummy_obj_metric_log_verbose)
2776
2777
    assert len(evals_result["valid_0"]) == 1
    assert "binary_logloss" in evals_result["valid_0"]
2778
2779

    # multiple metrics in params
2780
    train_booster(params=params_dummy_obj_metric_multi_verbose)
2781
2782
2783
    assert len(evals_result["valid_0"]) == 2
    assert "binary_logloss" in evals_result["valid_0"]
    assert "binary_error" in evals_result["valid_0"]
2784

2785
    # no custom objective, feval
2786
2787
    # default metric with custom one
    train_booster(feval=constant_metric)
2788
2789
2790
    assert len(evals_result["valid_0"]) == 2
    assert "binary_logloss" in evals_result["valid_0"]
    assert "error" in evals_result["valid_0"]
2791
2792
2793

    # default metric in params with custom one
    train_booster(params=params_obj_metric_log_verbose, feval=constant_metric)
2794
2795
2796
    assert len(evals_result["valid_0"]) == 2
    assert "binary_logloss" in evals_result["valid_0"]
    assert "error" in evals_result["valid_0"]
2797

2798
2799
2800
2801
2802
2803
2804
    # default metric in params with custom function returning a list of 2 metrics
    train_booster(params=params_obj_metric_log_verbose, feval=constant_metric_multi)
    assert len(evals_result["valid_0"]) == 3
    assert "binary_logloss" in evals_result["valid_0"]
    assert evals_result["valid_0"]["important_metric"] == [1.5, 1.5]
    assert evals_result["valid_0"]["irrelevant_metric"] == [7.8, 7.8]

2805
2806
    # non-default metric in params with custom one
    train_booster(params=params_obj_metric_err_verbose, feval=constant_metric)
2807
2808
2809
    assert len(evals_result["valid_0"]) == 2
    assert "binary_error" in evals_result["valid_0"]
    assert "error" in evals_result["valid_0"]
2810
2811
2812

    # multiple metrics in params with custom one
    train_booster(params=params_obj_metric_multi_verbose, feval=constant_metric)
2813
2814
2815
2816
    assert len(evals_result["valid_0"]) == 3
    assert "binary_logloss" in evals_result["valid_0"]
    assert "binary_error" in evals_result["valid_0"]
    assert "error" in evals_result["valid_0"]
2817
2818
2819
2820

    # custom metric is evaluated despite 'None' is passed
    train_booster(params=params_obj_metric_none_verbose, feval=constant_metric)
    assert len(evals_result) == 1
2821
    assert "error" in evals_result["valid_0"]
2822

2823
    # custom objective, feval
2824
    # no default metric, only custom one
2825
    train_booster(params=params_dummy_obj_verbose, feval=constant_metric)
2826
2827
    assert len(evals_result["valid_0"]) == 1
    assert "error" in evals_result["valid_0"]
2828
2829

    # metric in params with custom one
2830
    train_booster(params=params_dummy_obj_metric_log_verbose, feval=constant_metric)
2831
2832
2833
    assert len(evals_result["valid_0"]) == 2
    assert "binary_logloss" in evals_result["valid_0"]
    assert "error" in evals_result["valid_0"]
2834
2835

    # multiple metrics in params with custom one
2836
    train_booster(params=params_dummy_obj_metric_multi_verbose, feval=constant_metric)
2837
2838
2839
2840
    assert len(evals_result["valid_0"]) == 3
    assert "binary_logloss" in evals_result["valid_0"]
    assert "binary_error" in evals_result["valid_0"]
    assert "error" in evals_result["valid_0"]
2841
2842

    # custom metric is evaluated despite 'None' is passed
2843
    train_booster(params=params_dummy_obj_metric_none_verbose, feval=constant_metric)
2844
    assert len(evals_result) == 1
2845
    assert "error" in evals_result["valid_0"]
2846
2847

    X, y = load_digits(n_class=3, return_X_y=True)
2848
    lgb_train = lgb.Dataset(X, y)
2849

2850
    obj_multi_aliases = ["multiclass", "softmax", "multiclassova", "multiclass_ova", "ova", "ovr"]
2851
    for obj_multi_alias in obj_multi_aliases:
2852
        # Custom objective replaces multiclass
2853
2854
2855
2856
2857
        params_obj_class_3_verbose = {"objective": obj_multi_alias, "num_class": 3, "verbose": -1}
        params_dummy_obj_class_3_verbose = {"objective": dummy_obj, "num_class": 3, "verbose": -1}
        params_dummy_obj_class_1_verbose = {"objective": dummy_obj, "num_class": 1, "verbose": -1}
        params_obj_verbose = {"objective": obj_multi_alias, "verbose": -1}
        params_dummy_obj_verbose = {"objective": dummy_obj, "verbose": -1}
2858
2859
2860
        # multiclass default metric
        res = get_cv_result(params_obj_class_3_verbose)
        assert len(res) == 2
2861
        assert "valid multi_logloss-mean" in res
2862
2863
2864
        # multiclass default metric with custom one
        res = get_cv_result(params_obj_class_3_verbose, feval=constant_metric)
        assert len(res) == 4
2865
2866
        assert "valid multi_logloss-mean" in res
        assert "valid error-mean" in res
2867
        # multiclass metric alias with custom one for custom objective
2868
        res = get_cv_result(params_dummy_obj_class_3_verbose, feval=constant_metric)
2869
        assert len(res) == 2
2870
        assert "valid error-mean" in res
2871
        # no metric for invalid class_num
2872
        res = get_cv_result(params_dummy_obj_class_1_verbose)
2873
2874
        assert len(res) == 0
        # custom metric for invalid class_num
2875
        res = get_cv_result(params_dummy_obj_class_1_verbose, feval=constant_metric)
2876
        assert len(res) == 2
2877
        assert "valid error-mean" in res
2878
        # multiclass metric alias with custom one with invalid class_num
2879
        with pytest.raises(lgb.basic.LightGBMError, match="Multiclass objective and metrics don't match"):
2880
            get_cv_result(params_dummy_obj_class_1_verbose, metrics=obj_multi_alias, feval=constant_metric)
2881
        # multiclass default metric without num_class
2882
2883
2884
2885
        with pytest.raises(
            lgb.basic.LightGBMError,
            match="Number of classes should be specified and greater than 1 for multiclass training",
        ):
2886
            get_cv_result(params_obj_verbose)
2887
        for metric_multi_alias in obj_multi_aliases + ["multi_logloss"]:
2888
2889
2890
            # multiclass metric alias
            res = get_cv_result(params_obj_class_3_verbose, metrics=metric_multi_alias)
            assert len(res) == 2
2891
            assert "valid multi_logloss-mean" in res
2892
        # multiclass metric
2893
        res = get_cv_result(params_obj_class_3_verbose, metrics="multi_error")
2894
        assert len(res) == 2
2895
        assert "valid multi_error-mean" in res
2896
        # non-valid metric for multiclass objective
2897
        with pytest.raises(lgb.basic.LightGBMError, match="Multiclass objective and metrics don't match"):
2898
2899
            get_cv_result(params_obj_class_3_verbose, metrics="binary_logloss")
    params_class_3_verbose = {"num_class": 3, "verbose": -1}
2900
    # non-default num_class for default objective
2901
    with pytest.raises(lgb.basic.LightGBMError, match="Number of classes must be 1 for non-multiclass training"):
2902
2903
        get_cv_result(params_class_3_verbose)
    # no metric with non-default num_class for custom objective
2904
    res = get_cv_result(params_dummy_obj_class_3_verbose)
2905
    assert len(res) == 0
2906
    for metric_multi_alias in obj_multi_aliases + ["multi_logloss"]:
2907
        # multiclass metric alias for custom objective
2908
        res = get_cv_result(params_dummy_obj_class_3_verbose, metrics=metric_multi_alias)
2909
        assert len(res) == 2
2910
        assert "valid multi_logloss-mean" in res
2911
    # multiclass metric for custom objective
2912
    res = get_cv_result(params_dummy_obj_class_3_verbose, metrics="multi_error")
2913
    assert len(res) == 2
2914
    assert "valid multi_error-mean" in res
2915
    # binary metric with non-default num_class for custom objective
2916
    with pytest.raises(lgb.basic.LightGBMError, match="Multiclass objective and metrics don't match"):
2917
        get_cv_result(params_dummy_obj_class_3_verbose, metrics="binary_error")
2918
2919
2920
2921
2922


def test_multiple_feval_train():
    X, y = load_breast_cancer(return_X_y=True)

2923
    params = {"verbose": -1, "objective": "binary", "metric": "binary_logloss"}
2924
2925
2926

    X_train, X_validation, y_train, y_validation = train_test_split(X, y, test_size=0.2)

2927
2928
    train_dataset = lgb.Dataset(data=X_train, label=y_train)
    validation_dataset = lgb.Dataset(data=X_validation, label=y_validation, reference=train_dataset)
2929
2930
2931
2932
2933
2934
2935
    evals_result = {}
    lgb.train(
        params=params,
        train_set=train_dataset,
        valid_sets=validation_dataset,
        num_boost_round=5,
        feval=[constant_metric, decreasing_metric],
2936
        callbacks=[lgb.record_evaluation(evals_result)],
2937
    )
2938

2939
2940
2941
2942
    assert len(evals_result["valid_0"]) == 3
    assert "binary_logloss" in evals_result["valid_0"]
    assert "error" in evals_result["valid_0"]
    assert "decreasing_metric" in evals_result["valid_0"]
2943
2944


2945
2946
def test_objective_callable_train_binary_classification():
    X, y = load_breast_cancer(return_X_y=True)
2947
    params = {"verbose": -1, "objective": logloss_obj, "learning_rate": 0.01}
2948
    train_dataset = lgb.Dataset(X, y)
2949
    booster = lgb.train(params=params, train_set=train_dataset, num_boost_round=20)
2950
2951
2952
    y_pred = logistic_sigmoid(booster.predict(X))
    logloss_error = log_loss(y, y_pred)
    rocauc_error = roc_auc_score(y, y_pred)
2953
    assert booster.params["objective"] == "none"
2954
2955
    assert logloss_error == pytest.approx(0.547907)
    assert rocauc_error == pytest.approx(0.995944)
2956
2957
2958
2959


def test_objective_callable_train_regression():
    X, y = make_synthetic_regression()
2960
    params = {"verbose": -1, "objective": mse_obj}
2961
    lgb_train = lgb.Dataset(X, y)
2962
    booster = lgb.train(params, lgb_train, num_boost_round=20)
2963
2964
    y_pred = booster.predict(X)
    mse_error = mean_squared_error(y, y_pred)
2965
    assert booster.params["objective"] == "none"
2966
    assert mse_error == pytest.approx(286.724194)
2967
2968
2969
2970


def test_objective_callable_cv_binary_classification():
    X, y = load_breast_cancer(return_X_y=True)
2971
    params = {"verbose": -1, "objective": logloss_obj, "learning_rate": 0.01}
2972
    train_dataset = lgb.Dataset(X, y)
2973
2974
2975
2976
    cv_res = lgb.cv(params, train_dataset, num_boost_round=20, nfold=3, return_cvbooster=True)
    cv_booster = cv_res["cvbooster"].boosters
    cv_logloss_errors = [log_loss(y, logistic_sigmoid(cb.predict(X))) < 0.56 for cb in cv_booster]
    cv_objs = [cb.params["objective"] == "none" for cb in cv_booster]
2977
2978
2979
2980
2981
2982
2983
    assert all(cv_objs)
    assert all(cv_logloss_errors)


def test_objective_callable_cv_regression():
    X, y = make_synthetic_regression()
    lgb_train = lgb.Dataset(X, y)
2984
2985
2986
2987
2988
    params = {"verbose": -1, "objective": mse_obj}
    cv_res = lgb.cv(params, lgb_train, num_boost_round=20, nfold=3, stratified=False, return_cvbooster=True)
    cv_booster = cv_res["cvbooster"].boosters
    cv_mse_errors = [mean_squared_error(y, cb.predict(X)) < 463 for cb in cv_booster]
    cv_objs = [cb.params["objective"] == "none" for cb in cv_booster]
2989
2990
2991
2992
    assert all(cv_objs)
    assert all(cv_mse_errors)


2993
2994
2995
def test_multiple_feval_cv():
    X, y = load_breast_cancer(return_X_y=True)

2996
    params = {"verbose": -1, "objective": "binary", "metric": "binary_logloss"}
2997

2998
    train_dataset = lgb.Dataset(data=X, label=y)
2999
3000

    cv_results = lgb.cv(
3001
3002
        params=params, train_set=train_dataset, num_boost_round=5, feval=[constant_metric, decreasing_metric]
    )
3003
3004
3005

    # Expect three metrics but mean and stdv for each metric
    assert len(cv_results) == 6
3006
3007
3008
3009
3010
3011
    assert "valid binary_logloss-mean" in cv_results
    assert "valid error-mean" in cv_results
    assert "valid decreasing_metric-mean" in cv_results
    assert "valid binary_logloss-stdv" in cv_results
    assert "valid error-stdv" in cv_results
    assert "valid decreasing_metric-stdv" in cv_results
3012
3013


3014
3015
3016
3017
3018
3019
def test_default_objective_and_metric():
    X, y = load_breast_cancer(return_X_y=True)
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
    train_dataset = lgb.Dataset(data=X_train, label=y_train)
    validation_dataset = lgb.Dataset(data=X_test, label=y_test, reference=train_dataset)
    evals_result = {}
3020
    params = {"verbose": -1}
3021
3022
3023
3024
3025
    lgb.train(
        params=params,
        train_set=train_dataset,
        valid_sets=validation_dataset,
        num_boost_round=5,
3026
        callbacks=[lgb.record_evaluation(evals_result)],
3027
3028
    )

3029
3030
3031
3032
    assert "valid_0" in evals_result
    assert len(evals_result["valid_0"]) == 1
    assert "l2" in evals_result["valid_0"]
    assert len(evals_result["valid_0"]["l2"]) == 5
3033
3034


3035
@pytest.mark.parametrize("use_weight", [True, False])
3036
def test_multiclass_custom_objective(use_weight):
3037
3038
    def custom_obj(y_pred, ds):
        y_true = ds.get_label()
3039
3040
3041
        weight = ds.get_weight()
        grad, hess = sklearn_multiclass_custom_objective(y_true, y_pred, weight)
        return grad, hess
3042
3043
3044

    centers = [[-4, -4], [4, 4], [-4, 4]]
    X, y = make_blobs(n_samples=1_000, centers=centers, random_state=42)
3045
    weight = np.full_like(y, 2)
3046
    ds = lgb.Dataset(X, y)
3047
3048
    if use_weight:
        ds.set_weight(weight)
3049
    params = {"objective": "multiclass", "num_class": 3, "num_leaves": 7}
3050
3051
3052
    builtin_obj_bst = lgb.train(params, ds, num_boost_round=10)
    builtin_obj_preds = builtin_obj_bst.predict(X)

3053
    params["objective"] = custom_obj
3054
    custom_obj_bst = lgb.train(params, ds, num_boost_round=10)
3055
3056
3057
3058
3059
    custom_obj_preds = softmax(custom_obj_bst.predict(X))

    np.testing.assert_allclose(builtin_obj_preds, custom_obj_preds, rtol=0.01)


3060
@pytest.mark.parametrize("use_weight", [True, False])
3061
def test_multiclass_custom_eval(use_weight):
3062
3063
    def custom_eval(y_pred, ds):
        y_true = ds.get_label()
3064
3065
        weight = ds.get_weight()  # weight is None when not set
        loss = log_loss(y_true, y_pred, sample_weight=weight)
3066
        return "custom_logloss", loss, False
3067
3068
3069

    centers = [[-4, -4], [4, 4], [-4, 4]]
    X, y = make_blobs(n_samples=1_000, centers=centers, random_state=42)
3070
3071
3072
3073
    weight = np.full_like(y, 2)
    X_train, X_valid, y_train, y_valid, weight_train, weight_valid = train_test_split(
        X, y, weight, test_size=0.2, random_state=0
    )
3074
3075
    train_ds = lgb.Dataset(X_train, y_train)
    valid_ds = lgb.Dataset(X_valid, y_valid, reference=train_ds)
3076
3077
3078
    if use_weight:
        train_ds.set_weight(weight_train)
        valid_ds.set_weight(weight_valid)
3079
    params = {"objective": "multiclass", "num_class": 3, "num_leaves": 7}
3080
3081
3082
3083
3084
3085
    eval_result = {}
    bst = lgb.train(
        params,
        train_ds,
        num_boost_round=10,
        valid_sets=[train_ds, valid_ds],
3086
        valid_names=["train", "valid"],
3087
3088
3089
3090
3091
        feval=custom_eval,
        callbacks=[lgb.record_evaluation(eval_result)],
        keep_training_booster=True,
    )

3092
3093
    for key, ds in zip(["train", "valid"], [train_ds, valid_ds]):
        np.testing.assert_allclose(eval_result[key]["multi_logloss"], eval_result[key]["custom_logloss"])
3094
        _, metric, value, _ = bst.eval(ds, key, feval=custom_eval)[1]  # first element is multi_logloss
3095
        assert metric == "custom_logloss"
3096
3097
3098
        np.testing.assert_allclose(value, eval_result[key][metric][-1])


3099
@pytest.mark.skipif(psutil.virtual_memory().available / 1024 / 1024 / 1024 < 3, reason="not enough RAM")
3100
def test_model_size():
3101
    X, y = make_synthetic_regression()
3102
    data = lgb.Dataset(X, y)
3103
    bst = lgb.train({"verbose": -1}, data, num_boost_round=2)
3104
3105
    y_pred = bst.predict(X)
    model_str = bst.model_to_string()
3106
    one_tree = model_str[model_str.find("Tree=1") : model_str.find("end of trees")]
3107
    one_tree_size = len(one_tree)
3108
    one_tree = one_tree.replace("Tree=1", "Tree={}")
3109
3110
3111
    multiplier = 100
    total_trees = multiplier + 2
    try:
3112
3113
        before_tree_sizes = model_str[: model_str.find("tree_sizes")]
        trees = model_str[model_str.find("Tree=0") : model_str.find("end of trees")]
3114
        more_trees = (one_tree * multiplier).format(*range(2, total_trees))
3115
        after_trees = model_str[model_str.find("end of trees") :]
3116
3117
        num_end_spaces = 2**31 - one_tree_size * total_trees
        new_model_str = f"{before_tree_sizes}\n\n{trees}{more_trees}{after_trees}{'':{num_end_spaces}}"
3118
        assert len(new_model_str) > 2**31
3119
        bst.model_from_string(new_model_str)
3120
3121
3122
3123
        assert bst.num_trees() == total_trees
        y_pred_new = bst.predict(X, num_iteration=2)
        np.testing.assert_allclose(y_pred, y_pred_new)
    except MemoryError:
3124
        pytest.skipTest("not enough RAM")
3125
3126


3127
3128
3129
@pytest.mark.skipif(
    getenv("TASK", "") == "cuda", reason="Skip due to differences in implementation details of CUDA version"
)
3130
def test_get_split_value_histogram(rng_fixed_seed):
3131
3132
3133
3134
    X, y = make_synthetic_regression()
    X = np.repeat(X, 3, axis=0)
    y = np.repeat(y, 3, axis=0)
    X[:, 2] = np.random.default_rng(0).integers(0, 20, size=X.shape[0])
3135
    lgb_train = lgb.Dataset(X, y, categorical_feature=[2])
3136
    gbm = lgb.train({"verbose": -1}, lgb_train, num_boost_round=20)
3137
    # test XGBoost-style return value
3138
    params = {"feature": 0, "xgboost_style": True}
3139
3140
    assert gbm.get_split_value_histogram(**params).shape == (12, 2)
    assert gbm.get_split_value_histogram(bins=999, **params).shape == (12, 2)
3141
3142
3143
3144
    assert gbm.get_split_value_histogram(bins=-1, **params).shape == (1, 2)
    assert gbm.get_split_value_histogram(bins=0, **params).shape == (1, 2)
    assert gbm.get_split_value_histogram(bins=1, **params).shape == (1, 2)
    assert gbm.get_split_value_histogram(bins=2, **params).shape == (2, 2)
3145
3146
    assert gbm.get_split_value_histogram(bins=6, **params).shape == (6, 2)
    assert gbm.get_split_value_histogram(bins=7, **params).shape == (7, 2)
3147
3148
3149
    if lgb.compat.PANDAS_INSTALLED:
        np.testing.assert_allclose(
            gbm.get_split_value_histogram(0, xgboost_style=True).values,
3150
            gbm.get_split_value_histogram(gbm.feature_name()[0], xgboost_style=True).values,
3151
3152
3153
        )
        np.testing.assert_allclose(
            gbm.get_split_value_histogram(X.shape[-1] - 1, xgboost_style=True).values,
3154
            gbm.get_split_value_histogram(gbm.feature_name()[X.shape[-1] - 1], xgboost_style=True).values,
3155
3156
3157
3158
        )
    else:
        np.testing.assert_allclose(
            gbm.get_split_value_histogram(0, xgboost_style=True),
3159
            gbm.get_split_value_histogram(gbm.feature_name()[0], xgboost_style=True),
3160
3161
3162
        )
        np.testing.assert_allclose(
            gbm.get_split_value_histogram(X.shape[-1] - 1, xgboost_style=True),
3163
            gbm.get_split_value_histogram(gbm.feature_name()[X.shape[-1] - 1], xgboost_style=True),
3164
3165
3166
        )
    # test numpy-style return value
    hist, bins = gbm.get_split_value_histogram(0)
3167
3168
    assert len(hist) == 20
    assert len(bins) == 21
3169
3170
3171
    hist, bins = gbm.get_split_value_histogram(0, bins=999)
    assert len(hist) == 999
    assert len(bins) == 1000
3172
    with pytest.raises(ValueError, match="`bins` must be positive, when an integer"):
3173
        gbm.get_split_value_histogram(0, bins=-1)
3174
    with pytest.raises(ValueError, match="`bins` must be positive, when an integer"):
3175
3176
3177
3178
3179
3180
3181
3182
3183
3184
3185
3186
3187
3188
3189
        gbm.get_split_value_histogram(0, bins=0)
    hist, bins = gbm.get_split_value_histogram(0, bins=1)
    assert len(hist) == 1
    assert len(bins) == 2
    hist, bins = gbm.get_split_value_histogram(0, bins=2)
    assert len(hist) == 2
    assert len(bins) == 3
    hist, bins = gbm.get_split_value_histogram(0, bins=6)
    assert len(hist) == 6
    assert len(bins) == 7
    hist, bins = gbm.get_split_value_histogram(0, bins=7)
    assert len(hist) == 7
    assert len(bins) == 8
    hist_idx, bins_idx = gbm.get_split_value_histogram(0)
    hist_name, bins_name = gbm.get_split_value_histogram(gbm.feature_name()[0])
3190
    np_assert_array_equal(hist_idx, hist_name, strict=True)
3191
3192
3193
    np.testing.assert_allclose(bins_idx, bins_name)
    hist_idx, bins_idx = gbm.get_split_value_histogram(X.shape[-1] - 1)
    hist_name, bins_name = gbm.get_split_value_histogram(gbm.feature_name()[X.shape[-1] - 1])
3194
    np_assert_array_equal(hist_idx, hist_name, strict=True)
3195
3196
    np.testing.assert_allclose(bins_idx, bins_name)
    # test bins string type
3197
3198
    hist_vals, bin_edges = gbm.get_split_value_histogram(0, bins="auto")
    hist = gbm.get_split_value_histogram(0, bins="auto", xgboost_style=True)
3199
3200
    if lgb.compat.PANDAS_INSTALLED:
        mask = hist_vals > 0
3201
3202
        # strict=False due to dtype mismatch: 'int64' and 'float64'
        np_assert_array_equal(hist_vals[mask], hist["Count"].values, strict=False)
3203
        np.testing.assert_allclose(bin_edges[1:][mask], hist["SplitValue"].values)
3204
3205
    else:
        mask = hist_vals > 0
3206
3207
        # strict=False due to dtype mismatch: 'int64' and 'float64'
        np_assert_array_equal(hist_vals[mask], hist[:, 1], strict=False)
3208
        np.testing.assert_allclose(bin_edges[1:][mask], hist[:, 0])
3209
    # test histogram is disabled for categorical features
3210
3211
3212
    with pytest.raises(
        lgb.basic.LightGBMError, match="Cannot compute split value histogram for the categorical feature"
    ):
3213
        gbm.get_split_value_histogram(2)
3214
3215


3216
3217
3218
@pytest.mark.skipif(
    getenv("TASK", "") == "cuda", reason="Skip due to differences in implementation details of CUDA version"
)
3219
def test_early_stopping_for_only_first_metric():
3220
    def metrics_combination_train_regression(valid_sets, metric_list, assumed_iteration, first_metric_only, feval=None):
3221
        params = {
3222
3223
3224
3225
3226
3227
            "objective": "regression",
            "learning_rate": 1.1,
            "num_leaves": 10,
            "metric": metric_list,
            "verbose": -1,
            "seed": 123,
3228
        }
3229
3230
3231
3232
3233
3234
        gbm = lgb.train(
            params,
            lgb_train,
            num_boost_round=25,
            valid_sets=valid_sets,
            feval=feval,
3235
            callbacks=[lgb.early_stopping(stopping_rounds=5, first_metric_only=first_metric_only)],
3236
        )
3237
        assert assumed_iteration == gbm.best_iteration
3238

3239
3240
3241
    def metrics_combination_cv_regression(
        metric_list, assumed_iteration, first_metric_only, eval_train_metric, feval=None
    ):
3242
        params = {
3243
3244
3245
3246
3247
3248
3249
            "objective": "regression",
            "learning_rate": 0.9,
            "num_leaves": 10,
            "metric": metric_list,
            "verbose": -1,
            "seed": 123,
            "gpu_use_dp": True,
3250
        }
3251
3252
3253
3254
3255
3256
3257
        ret = lgb.cv(
            params,
            train_set=lgb_train,
            num_boost_round=25,
            stratified=False,
            feval=feval,
            callbacks=[lgb.early_stopping(stopping_rounds=5, first_metric_only=first_metric_only)],
3258
            eval_train_metric=eval_train_metric,
3259
        )
3260
3261
        assert assumed_iteration == len(ret[list(ret.keys())[0]])

3262
    X, y = make_synthetic_regression()
3263
3264
3265
3266
3267
3268
3269
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
    X_test1, X_test2, y_test1, y_test2 = train_test_split(X_test, y_test, test_size=0.5, random_state=73)
    lgb_train = lgb.Dataset(X_train, y_train)
    lgb_valid1 = lgb.Dataset(X_test1, y_test1, reference=lgb_train)
    lgb_valid2 = lgb.Dataset(X_test2, y_test2, reference=lgb_train)

    iter_valid1_l1 = 3
3270
3271
    iter_valid1_l2 = 3
    iter_valid2_l1 = 3
3272
    iter_valid2_l2 = 15
3273
    assert len({iter_valid1_l1, iter_valid1_l2, iter_valid2_l1, iter_valid2_l2}) == 2
3274
3275
3276
3277
    iter_min_l1 = min([iter_valid1_l1, iter_valid2_l1])
    iter_min_l2 = min([iter_valid1_l2, iter_valid2_l2])
    iter_min_valid1 = min([iter_valid1_l1, iter_valid1_l2])

3278
3279
    iter_cv_l1 = 15
    iter_cv_l2 = 13
3280
    assert len({iter_cv_l1, iter_cv_l2}) == 2
3281
3282
3283
3284
3285
3286
3287
    iter_cv_min = min([iter_cv_l1, iter_cv_l2])

    # test for lgb.train
    metrics_combination_train_regression(lgb_valid1, [], iter_valid1_l2, False)
    metrics_combination_train_regression(lgb_valid1, [], iter_valid1_l2, True)
    metrics_combination_train_regression(lgb_valid1, None, iter_valid1_l2, False)
    metrics_combination_train_regression(lgb_valid1, None, iter_valid1_l2, True)
3288
3289
3290
3291
3292
3293
    metrics_combination_train_regression(lgb_valid1, "l2", iter_valid1_l2, True)
    metrics_combination_train_regression(lgb_valid1, "l1", iter_valid1_l1, True)
    metrics_combination_train_regression(lgb_valid1, ["l2", "l1"], iter_valid1_l2, True)
    metrics_combination_train_regression(lgb_valid1, ["l1", "l2"], iter_valid1_l1, True)
    metrics_combination_train_regression(lgb_valid1, ["l2", "l1"], iter_min_valid1, False)
    metrics_combination_train_regression(lgb_valid1, ["l1", "l2"], iter_min_valid1, False)
3294
3295

    # test feval for lgb.train
3296
3297
3298
3299
3300
3301
3302
3303
3304
3305
3306
3307
3308
3309
3310
3311
3312
3313
3314
3315
3316
    metrics_combination_train_regression(
        lgb_valid1,
        "None",
        1,
        False,
        feval=lambda preds, train_data: [decreasing_metric(preds, train_data), constant_metric(preds, train_data)],
    )
    metrics_combination_train_regression(
        lgb_valid1,
        "None",
        25,
        True,
        feval=lambda preds, train_data: [decreasing_metric(preds, train_data), constant_metric(preds, train_data)],
    )
    metrics_combination_train_regression(
        lgb_valid1,
        "None",
        1,
        True,
        feval=lambda preds, train_data: [constant_metric(preds, train_data), decreasing_metric(preds, train_data)],
    )
3317
3318

    # test with two valid data for lgb.train
3319
3320
3321
3322
    metrics_combination_train_regression([lgb_valid1, lgb_valid2], ["l2", "l1"], iter_min_l2, True)
    metrics_combination_train_regression([lgb_valid2, lgb_valid1], ["l2", "l1"], iter_min_l2, True)
    metrics_combination_train_regression([lgb_valid1, lgb_valid2], ["l1", "l2"], iter_min_l1, True)
    metrics_combination_train_regression([lgb_valid2, lgb_valid1], ["l1", "l2"], iter_min_l1, True)
3323
3324
3325

    # test for lgb.cv
    metrics_combination_cv_regression(None, iter_cv_l2, True, False)
3326
3327
3328
3329
3330
3331
    metrics_combination_cv_regression("l2", iter_cv_l2, True, False)
    metrics_combination_cv_regression("l1", iter_cv_l1, True, False)
    metrics_combination_cv_regression(["l2", "l1"], iter_cv_l2, True, False)
    metrics_combination_cv_regression(["l1", "l2"], iter_cv_l1, True, False)
    metrics_combination_cv_regression(["l2", "l1"], iter_cv_min, False, False)
    metrics_combination_cv_regression(["l1", "l2"], iter_cv_min, False, False)
3332
    metrics_combination_cv_regression(None, iter_cv_l2, True, True)
3333
3334
3335
3336
3337
3338
    metrics_combination_cv_regression("l2", iter_cv_l2, True, True)
    metrics_combination_cv_regression("l1", iter_cv_l1, True, True)
    metrics_combination_cv_regression(["l2", "l1"], iter_cv_l2, True, True)
    metrics_combination_cv_regression(["l1", "l2"], iter_cv_l1, True, True)
    metrics_combination_cv_regression(["l2", "l1"], iter_cv_min, False, True)
    metrics_combination_cv_regression(["l1", "l2"], iter_cv_min, False, True)
3339
3340

    # test feval for lgb.cv
3341
3342
3343
3344
3345
3346
3347
3348
3349
3350
3351
3352
3353
3354
3355
3356
3357
3358
3359
3360
3361
    metrics_combination_cv_regression(
        "None",
        1,
        False,
        False,
        feval=lambda preds, train_data: [decreasing_metric(preds, train_data), constant_metric(preds, train_data)],
    )
    metrics_combination_cv_regression(
        "None",
        25,
        True,
        False,
        feval=lambda preds, train_data: [decreasing_metric(preds, train_data), constant_metric(preds, train_data)],
    )
    metrics_combination_cv_regression(
        "None",
        1,
        True,
        False,
        feval=lambda preds, train_data: [constant_metric(preds, train_data), decreasing_metric(preds, train_data)],
    )
3362
3363
3364
3365
3366
3367


def test_node_level_subcol():
    X, y = load_breast_cancer(return_X_y=True)
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
    params = {
3368
3369
3370
3371
3372
        "objective": "binary",
        "metric": "binary_logloss",
        "feature_fraction_bynode": 0.8,
        "feature_fraction": 1.0,
        "verbose": -1,
3373
3374
3375
3376
    }
    lgb_train = lgb.Dataset(X_train, y_train)
    lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train)
    evals_result = {}
3377
    gbm = lgb.train(
3378
        params, lgb_train, num_boost_round=25, valid_sets=lgb_eval, callbacks=[lgb.record_evaluation(evals_result)]
3379
    )
3380
3381
    ret = log_loss(y_test, gbm.predict(X_test))
    assert ret < 0.14
3382
3383
    assert evals_result["valid_0"]["binary_logloss"][-1] == pytest.approx(ret)
    params["feature_fraction"] = 0.5
3384
3385
3386
3387
3388
    gbm2 = lgb.train(params, lgb_train, num_boost_round=25)
    ret2 = log_loss(y_test, gbm2.predict(X_test))
    assert ret != ret2


3389
3390
3391
3392
3393
3394
3395
3396
3397
3398
3399
def test_forced_split_feature_indices(tmp_path):
    X, y = make_synthetic_regression()
    forced_split = {
        "feature": 0,
        "threshold": 0.5,
        "left": {"feature": X.shape[1], "threshold": 0.5},
    }
    tmp_split_file = tmp_path / "forced_split.json"
    with open(tmp_split_file, "w") as f:
        f.write(json.dumps(forced_split))
    lgb_train = lgb.Dataset(X, y)
3400
    params = {"objective": "regression", "forcedsplits_filename": tmp_split_file}
3401
    with pytest.raises(lgb.basic.LightGBMError, match="Forced splits file includes feature index"):
3402
        lgb.train(params, lgb_train)
3403
3404


3405
def test_forced_bins():
3406
    x = np.empty((100, 2))
3407
3408
3409
    x[:, 0] = np.arange(0, 1, 0.01)
    x[:, 1] = -np.arange(0, 1, 0.01)
    y = np.arange(0, 1, 0.01)
3410
3411
3412
3413
3414
3415
3416
3417
3418
    forcedbins_filename = Path(__file__).absolute().parents[2] / "examples" / "regression" / "forced_bins.json"
    params = {
        "objective": "regression_l1",
        "max_bin": 5,
        "forcedbins_filename": forcedbins_filename,
        "num_leaves": 2,
        "min_data_in_leaf": 1,
        "verbose": -1,
    }
3419
3420
3421
3422
3423
3424
3425
3426
3427
3428
    lgb_x = lgb.Dataset(x, label=y)
    est = lgb.train(params, lgb_x, num_boost_round=20)
    new_x = np.zeros((3, x.shape[1]))
    new_x[:, 0] = [0.31, 0.37, 0.41]
    predicted = est.predict(new_x)
    assert len(np.unique(predicted)) == 3
    new_x[:, 0] = [0, 0, 0]
    new_x[:, 1] = [-0.9, -0.6, -0.3]
    predicted = est.predict(new_x)
    assert len(np.unique(predicted)) == 1
3429
    params["forcedbins_filename"] = ""
3430
3431
3432
3433
    lgb_x = lgb.Dataset(x, label=y)
    est = lgb.train(params, lgb_x, num_boost_round=20)
    predicted = est.predict(new_x)
    assert len(np.unique(predicted)) == 3
3434
3435
    params["forcedbins_filename"] = (
        Path(__file__).absolute().parents[2] / "examples" / "regression" / "forced_bins2.json"
3436
    )
3437
    params["max_bin"] = 11
3438
3439
3440
3441
3442
3443
3444
3445
3446
3447
    lgb_x = lgb.Dataset(x[:, :1], label=y)
    est = lgb.train(params, lgb_x, num_boost_round=50)
    predicted = est.predict(x[1:, :1])
    _, counts = np.unique(predicted, return_counts=True)
    assert min(counts) >= 9
    assert max(counts) <= 11


def test_binning_same_sign():
    # test that binning works properly for features with only positive or only negative values
3448
    x = np.empty((99, 2))
3449
3450
3451
    x[:, 0] = np.arange(0.01, 1, 0.01)
    x[:, 1] = -np.arange(0.01, 1, 0.01)
    y = np.arange(0.01, 1, 0.01)
3452
3453
3454
3455
3456
3457
3458
3459
    params = {
        "objective": "regression_l1",
        "max_bin": 5,
        "num_leaves": 2,
        "min_data_in_leaf": 1,
        "verbose": -1,
        "seed": 0,
    }
3460
3461
3462
3463
3464
3465
3466
3467
3468
3469
3470
3471
3472
3473
    lgb_x = lgb.Dataset(x, label=y)
    est = lgb.train(params, lgb_x, num_boost_round=20)
    new_x = np.zeros((3, 2))
    new_x[:, 0] = [-1, 0, 1]
    predicted = est.predict(new_x)
    assert predicted[0] == pytest.approx(predicted[1])
    assert predicted[1] != pytest.approx(predicted[2])
    new_x = np.zeros((3, 2))
    new_x[:, 1] = [-1, 0, 1]
    predicted = est.predict(new_x)
    assert predicted[0] != pytest.approx(predicted[1])
    assert predicted[1] == pytest.approx(predicted[2])


3474
def test_dataset_update_params(rng):
3475
3476
3477
3478
3479
3480
3481
3482
3483
3484
3485
3486
3487
3488
3489
3490
3491
3492
3493
3494
3495
3496
3497
3498
3499
3500
3501
3502
3503
3504
3505
3506
3507
3508
3509
3510
3511
3512
3513
3514
3515
3516
3517
3518
3519
3520
3521
3522
    default_params = {
        "max_bin": 100,
        "max_bin_by_feature": [20, 10],
        "bin_construct_sample_cnt": 10000,
        "min_data_in_bin": 1,
        "use_missing": False,
        "zero_as_missing": False,
        "categorical_feature": [0],
        "feature_pre_filter": True,
        "pre_partition": False,
        "enable_bundle": True,
        "data_random_seed": 0,
        "is_enable_sparse": True,
        "header": True,
        "two_round": True,
        "label_column": 0,
        "weight_column": 0,
        "group_column": 0,
        "ignore_column": 0,
        "min_data_in_leaf": 10,
        "linear_tree": False,
        "precise_float_parser": True,
        "verbose": -1,
    }
    unchangeable_params = {
        "max_bin": 150,
        "max_bin_by_feature": [30, 5],
        "bin_construct_sample_cnt": 5000,
        "min_data_in_bin": 2,
        "use_missing": True,
        "zero_as_missing": True,
        "categorical_feature": [0, 1],
        "feature_pre_filter": False,
        "pre_partition": True,
        "enable_bundle": False,
        "data_random_seed": 1,
        "is_enable_sparse": False,
        "header": False,
        "two_round": False,
        "label_column": 1,
        "weight_column": 1,
        "group_column": 1,
        "ignore_column": 1,
        "forcedbins_filename": "/some/path/forcedbins.json",
        "min_data_in_leaf": 2,
        "linear_tree": True,
        "precise_float_parser": False,
    }
3523
3524
    X = rng.uniform(size=(100, 2))
    y = rng.uniform(size=(100,))
3525
3526
3527
3528
3529
3530
3531
3532
3533
3534
3535
3536
3537
3538
3539
3540
3541
3542
3543
3544
3545
3546
3547
3548
3549
3550
3551
3552

    # decreasing without freeing raw data is allowed
    lgb_data = lgb.Dataset(X, y, params=default_params, free_raw_data=False).construct()
    default_params["min_data_in_leaf"] -= 1
    lgb.train(default_params, lgb_data, num_boost_round=3)

    # decreasing before lazy init is allowed
    lgb_data = lgb.Dataset(X, y, params=default_params)
    default_params["min_data_in_leaf"] -= 1
    lgb.train(default_params, lgb_data, num_boost_round=3)

    # increasing is allowed
    default_params["min_data_in_leaf"] += 2
    lgb.train(default_params, lgb_data, num_boost_round=3)

    # decreasing with disabled filter is allowed
    default_params["feature_pre_filter"] = False
    lgb_data = lgb.Dataset(X, y, params=default_params).construct()
    default_params["min_data_in_leaf"] -= 4
    lgb.train(default_params, lgb_data, num_boost_round=3)

    # decreasing with enabled filter is disallowed;
    # also changes of other params are disallowed
    default_params["feature_pre_filter"] = True
    lgb_data = lgb.Dataset(X, y, params=default_params).construct()
    for key, value in unchangeable_params.items():
        new_params = default_params.copy()
        new_params[key] = value
3553
3554
3555
3556
        if key != "forcedbins_filename":
            param_name = key
        else:
            param_name = "forced bins"
3557
3558
3559
3560
3561
        err_msg = (
            "Reducing `min_data_in_leaf` with `feature_pre_filter=true` may cause *"
            if key == "min_data_in_leaf"
            else f"Cannot change {param_name} *"
        )
3562
3563
3564
3565
        with np.testing.assert_raises_regex(lgb.basic.LightGBMError, err_msg):
            lgb.train(new_params, lgb_data, num_boost_round=3)


3566
def test_dataset_params_with_reference(rng):
3567
    default_params = {"max_bin": 100}
3568
3569
3570
3571
    X = rng.uniform(size=(100, 2))
    y = rng.uniform(size=(100,))
    X_val = rng.uniform(size=(100, 2))
    y_val = rng.uniform(size=(100,))
3572
3573
3574
3575
3576
3577
3578
3579
3580
    lgb_train = lgb.Dataset(X, y, params=default_params, free_raw_data=False).construct()
    lgb_val = lgb.Dataset(X_val, y_val, reference=lgb_train, free_raw_data=False).construct()
    assert lgb_train.get_params() == default_params
    assert lgb_val.get_params() == default_params
    lgb.train(default_params, lgb_train, valid_sets=[lgb_val])


def test_extra_trees():
    # check extra trees increases regularization
3581
    X, y = make_synthetic_regression()
3582
    lgb_x = lgb.Dataset(X, label=y)
3583
    params = {"objective": "regression", "num_leaves": 32, "verbose": -1, "extra_trees": False, "seed": 0}
3584
3585
3586
    est = lgb.train(params, lgb_x, num_boost_round=10)
    predicted = est.predict(X)
    err = mean_squared_error(y, predicted)
3587
    params["extra_trees"] = True
3588
3589
3590
3591
3592
3593
3594
3595
    est = lgb.train(params, lgb_x, num_boost_round=10)
    predicted_new = est.predict(X)
    err_new = mean_squared_error(y, predicted_new)
    assert err < err_new


def test_path_smoothing():
    # check path smoothing increases regularization
3596
    X, y = make_synthetic_regression()
3597
    lgb_x = lgb.Dataset(X, label=y)
3598
    params = {"objective": "regression", "num_leaves": 32, "verbose": -1, "seed": 0}
3599
3600
3601
    est = lgb.train(params, lgb_x, num_boost_round=10)
    predicted = est.predict(X)
    err = mean_squared_error(y, predicted)
3602
    params["path_smooth"] = 1
3603
3604
3605
3606
3607
3608
    est = lgb.train(params, lgb_x, num_boost_round=10)
    predicted_new = est.predict(X)
    err_new = mean_squared_error(y, predicted_new)
    assert err < err_new


3609
def test_trees_to_dataframe(rng):
3610
3611
3612
    pytest.importorskip("pandas")

    def _imptcs_to_numpy(X, impcts_dict):
3613
3614
        cols = [f"Column_{i}" for i in range(X.shape[1])]
        return [impcts_dict.get(col, 0.0) for col in cols]
3615
3616
3617
3618
3619
3620

    X, y = load_breast_cancer(return_X_y=True)
    data = lgb.Dataset(X, label=y)
    num_trees = 10
    bst = lgb.train({"objective": "binary", "verbose": -1}, data, num_trees)
    tree_df = bst.trees_to_dataframe()
3621
    split_dict = tree_df[~tree_df["split_gain"].isnull()].groupby("split_feature").size().to_dict()
3622

3623
    gains_dict = tree_df.groupby("split_feature")["split_gain"].sum().to_dict()
3624
3625
3626

    tree_split = _imptcs_to_numpy(X, split_dict)
    tree_gains = _imptcs_to_numpy(X, gains_dict)
3627
3628
3629
3630
    mod_split = bst.feature_importance("split")
    mod_gains = bst.feature_importance("gain")
    num_trees_from_df = tree_df["tree_index"].nunique()
    obs_counts_from_df = tree_df.loc[tree_df["node_depth"] == 1, "count"].values
3631
3632
3633
3634
3635
3636
3637
3638

    np.testing.assert_equal(tree_split, mod_split)
    np.testing.assert_allclose(tree_gains, mod_gains)
    assert num_trees_from_df == num_trees
    np.testing.assert_equal(obs_counts_from_df, len(y))

    # test edge case with one leaf
    X = np.ones((10, 2))
3639
    y = rng.uniform(size=(10,))
3640
3641
3642
3643
3644
    data = lgb.Dataset(X, label=y)
    bst = lgb.train({"objective": "binary", "verbose": -1}, data, num_trees)
    tree_df = bst.trees_to_dataframe()

    assert len(tree_df) == 1
3645
3646
3647
3648
3649
3650
3651
3652
3653
3654
3655
3656
3657
3658
3659
3660
3661
    assert tree_df.loc[0, "tree_index"] == 0
    assert tree_df.loc[0, "node_depth"] == 1
    assert tree_df.loc[0, "node_index"] == "0-L0"
    assert tree_df.loc[0, "value"] is not None
    for col in (
        "left_child",
        "right_child",
        "parent_index",
        "split_feature",
        "split_gain",
        "threshold",
        "decision_type",
        "missing_direction",
        "missing_type",
        "weight",
        "count",
    ):
3662
3663
3664
3665
        assert tree_df.loc[0, col] is None


def test_interaction_constraints():
3666
    X, y = make_synthetic_regression(n_samples=200)
3667
3668
3669
    num_features = X.shape[1]
    train_data = lgb.Dataset(X, label=y)
    # check that constraint containing all features is equivalent to no constraint
3670
    params = {"verbose": -1, "seed": 0}
3671
3672
    est = lgb.train(params, train_data, num_boost_round=10)
    pred1 = est.predict(X)
3673
    est = lgb.train(dict(params, interaction_constraints=[list(range(num_features))]), train_data, num_boost_round=10)
3674
3675
3676
    pred2 = est.predict(X)
    np.testing.assert_allclose(pred1, pred2)
    # check that constraint partitioning the features reduces train accuracy
3677
    est = lgb.train(dict(params, interaction_constraints=[[0, 2], [1, 3]]), train_data, num_boost_round=10)
3678
3679
3680
    pred3 = est.predict(X)
    assert mean_squared_error(y, pred1) < mean_squared_error(y, pred3)
    # check that constraints consisting of single features reduce accuracy further
3681
3682
3683
    est = lgb.train(
        dict(params, interaction_constraints=[[i] for i in range(num_features)]), train_data, num_boost_round=10
    )
3684
3685
3686
3687
3688
3689
    pred4 = est.predict(X)
    assert mean_squared_error(y, pred3) < mean_squared_error(y, pred4)
    # test that interaction constraints work when not all features are used
    X = np.concatenate([np.zeros((X.shape[0], 1)), X], axis=1)
    num_features = X.shape[1]
    train_data = lgb.Dataset(X, label=y)
3690
3691
3692
3693
3694
    est = lgb.train(
        dict(params, interaction_constraints=[[0] + list(range(2, num_features)), [1] + list(range(2, num_features))]),
        train_data,
        num_boost_round=10,
    )
3695
3696


3697
def test_linear_trees_num_threads(rng_fixed_seed):
3698
3699
    # check that number of threads does not affect result
    x = np.arange(0, 1000, 0.1)
3700
    y = 2 * x + rng_fixed_seed.normal(loc=0, scale=0.1, size=(len(x),))
3701
3702
    x = x[:, np.newaxis]
    lgb_train = lgb.Dataset(x, label=y)
3703
    params = {"verbose": -1, "objective": "regression", "seed": 0, "linear_tree": True, "num_threads": 2}
3704
3705
3706
3707
3708
3709
3710
3711
    est = lgb.train(params, lgb_train, num_boost_round=100)
    pred1 = est.predict(x)
    params["num_threads"] = 4
    est = lgb.train(params, lgb_train, num_boost_round=100)
    pred2 = est.predict(x)
    np.testing.assert_allclose(pred1, pred2)


3712
def test_linear_trees(tmp_path, rng_fixed_seed):
3713
3714
    # check that setting linear_tree=True fits better than ordinary trees when data has linear relationship
    x = np.arange(0, 100, 0.1)
3715
    y = 2 * x + rng_fixed_seed.normal(0, 0.1, len(x))
3716
3717
    x = x[:, np.newaxis]
    lgb_train = lgb.Dataset(x, label=y)
3718
    params = {"verbose": -1, "metric": "mse", "seed": 0, "num_leaves": 2}
3719
3720
3721
3722
    est = lgb.train(params, lgb_train, num_boost_round=10)
    pred1 = est.predict(x)
    lgb_train = lgb.Dataset(x, label=y)
    res = {}
3723
    est = lgb.train(
3724
        dict(params, linear_tree=True),
3725
3726
3727
        lgb_train,
        num_boost_round=10,
        valid_sets=[lgb_train],
3728
3729
        valid_names=["train"],
        callbacks=[lgb.record_evaluation(res)],
3730
    )
3731
    pred2 = est.predict(x)
3732
    assert res["train"]["l2"][-1] == pytest.approx(mean_squared_error(y, pred2), abs=1e-1)
3733
3734
3735
3736
3737
3738
3739
3740
    assert mean_squared_error(y, pred2) < mean_squared_error(y, pred1)
    # test again with nans in data
    x[:10] = np.nan
    lgb_train = lgb.Dataset(x, label=y)
    est = lgb.train(params, lgb_train, num_boost_round=10)
    pred1 = est.predict(x)
    lgb_train = lgb.Dataset(x, label=y)
    res = {}
3741
    est = lgb.train(
3742
        dict(params, linear_tree=True),
3743
3744
3745
        lgb_train,
        num_boost_round=10,
        valid_sets=[lgb_train],
3746
3747
        valid_names=["train"],
        callbacks=[lgb.record_evaluation(res)],
3748
    )
3749
    pred2 = est.predict(x)
3750
    assert res["train"]["l2"][-1] == pytest.approx(mean_squared_error(y, pred2), abs=1e-1)
3751
3752
3753
    assert mean_squared_error(y, pred2) < mean_squared_error(y, pred1)
    # test again with bagging
    res = {}
3754
    est = lgb.train(
3755
        dict(params, linear_tree=True, subsample=0.8, bagging_freq=1),
3756
3757
3758
        lgb_train,
        num_boost_round=10,
        valid_sets=[lgb_train],
3759
3760
        valid_names=["train"],
        callbacks=[lgb.record_evaluation(res)],
3761
    )
3762
    pred = est.predict(x)
3763
    assert res["train"]["l2"][-1] == pytest.approx(mean_squared_error(y, pred), abs=1e-1)
3764
3765
3766
3767
3768
3769
    # test with a feature that has only one non-nan value
    x = np.concatenate([np.ones([x.shape[0], 1]), x], 1)
    x[500:, 1] = np.nan
    y[500:] += 10
    lgb_train = lgb.Dataset(x, label=y)
    res = {}
3770
    est = lgb.train(
3771
        dict(params, linear_tree=True, subsample=0.8, bagging_freq=1),
3772
3773
3774
        lgb_train,
        num_boost_round=10,
        valid_sets=[lgb_train],
3775
3776
        valid_names=["train"],
        callbacks=[lgb.record_evaluation(res)],
3777
    )
3778
    pred = est.predict(x)
3779
    assert res["train"]["l2"][-1] == pytest.approx(mean_squared_error(y, pred), abs=1e-1)
3780
3781
3782
    # test with a categorical feature
    x[:250, 0] = 0
    y[:250] += 10
3783
    lgb_train = lgb.Dataset(x, label=y, categorical_feature=[0])
3784
3785
3786
3787
3788
    est = lgb.train(
        dict(params, linear_tree=True, subsample=0.8, bagging_freq=1),
        lgb_train,
        num_boost_round=10,
    )
3789
3790
3791
3792
3793
3794
3795
3796
3797
3798
3799
3800
3801
3802
3803
3804
3805
3806
3807
3808
    # test refit: same results on same data
    est2 = est.refit(x, label=y)
    p1 = est.predict(x)
    p2 = est2.predict(x)
    assert np.mean(np.abs(p1 - p2)) < 2

    # test refit with save and load
    temp_model = str(tmp_path / "temp_model.txt")
    est.save_model(temp_model)
    est2 = lgb.Booster(model_file=temp_model)
    est2 = est2.refit(x, label=y)
    p1 = est.predict(x)
    p2 = est2.predict(x)
    assert np.mean(np.abs(p1 - p2)) < 2
    # test refit: different results training on different data
    est3 = est.refit(x[:100, :], label=y[:100])
    p3 = est3.predict(x)
    assert np.mean(np.abs(p2 - p1)) > np.abs(np.max(p3 - p1))
    # test when num_leaves - 1 < num_features and when num_leaves - 1 > num_features
    X_train, _, y_train, _ = train_test_split(*load_breast_cancer(return_X_y=True), test_size=0.1, random_state=2)
3809
    params = {"linear_tree": True, "verbose": -1, "metric": "mse", "seed": 0}
3810
3811
3812
3813
3814
3815
3816
3817
3818
3819
3820
3821
3822
3823
    train_data = lgb.Dataset(
        X_train,
        label=y_train,
        params=dict(params, num_leaves=2),
        categorical_feature=[0],
    )
    est = lgb.train(params, train_data, num_boost_round=10)
    train_data = lgb.Dataset(
        X_train,
        label=y_train,
        params=dict(params, num_leaves=60),
        categorical_feature=[0],
    )
    est = lgb.train(params, train_data, num_boost_round=10)
3824
3825


3826
def test_save_and_load_linear(tmp_path):
3827
3828
3829
    X_train, X_test, y_train, y_test = train_test_split(
        *load_breast_cancer(return_X_y=True), test_size=0.1, random_state=2
    )
3830
    X_train = np.concatenate([np.ones((X_train.shape[0], 1)), X_train], 1)
3831
3832
3833
    X_train[: X_train.shape[0] // 2, 0] = 0
    y_train[: X_train.shape[0] // 2] = 1
    params = {"linear_tree": True}
3834
3835
    train_data_1 = lgb.Dataset(X_train, label=y_train, params=params, categorical_feature=[0])
    est_1 = lgb.train(params, train_data_1, num_boost_round=10)
3836
3837
    pred_1 = est_1.predict(X_train)

3838
    tmp_dataset = str(tmp_path / "temp_dataset.bin")
3839
3840
3841
3842
3843
3844
    train_data_1.save_binary(tmp_dataset)
    train_data_2 = lgb.Dataset(tmp_dataset)
    est_2 = lgb.train(params, train_data_2, num_boost_round=10)
    pred_2 = est_2.predict(X_train)
    np.testing.assert_allclose(pred_1, pred_2)

3845
    model_file = str(tmp_path / "model.txt")
3846
3847
3848
3849
3850
3851
    est_2.save_model(model_file)
    est_3 = lgb.Booster(model_file=model_file)
    pred_3 = est_3.predict(X_train)
    np.testing.assert_allclose(pred_2, pred_3)


3852
3853
3854
def test_linear_single_leaf():
    X_train, y_train = load_breast_cancer(return_X_y=True)
    train_data = lgb.Dataset(X_train, label=y_train)
3855
    params = {"objective": "binary", "linear_tree": True, "min_sum_hessian": 5000}
3856
3857
3858
3859
3860
    bst = lgb.train(params, train_data, num_boost_round=5)
    y_pred = bst.predict(X_train)
    assert log_loss(y_train, y_pred) < 0.661


3861
3862
3863
3864
3865
3866
3867
3868
3869
3870
3871
3872
3873
3874
3875
3876
def test_linear_raises_informative_errors_on_unsupported_params():
    X, y = make_synthetic_regression()
    with pytest.raises(lgb.basic.LightGBMError, match="Cannot use regression_l1 objective when fitting linear trees"):
        lgb.train(
            train_set=lgb.Dataset(X, label=y),
            params={"linear_tree": True, "objective": "regression_l1"},
            num_boost_round=1,
        )
    with pytest.raises(lgb.basic.LightGBMError, match="zero_as_missing must be false when fitting linear trees"):
        lgb.train(
            train_set=lgb.Dataset(X, label=y),
            params={"linear_tree": True, "zero_as_missing": True},
            num_boost_round=1,
        )


3877
3878
3879
3880
3881
def test_predict_with_start_iteration():
    def inner_test(X, y, params, early_stopping_rounds):
        X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
        train_data = lgb.Dataset(X_train, label=y_train)
        valid_data = lgb.Dataset(X_test, label=y_test)
3882
        callbacks = [lgb.early_stopping(early_stopping_rounds)] if early_stopping_rounds is not None else []
3883
        booster = lgb.train(params, train_data, num_boost_round=50, valid_sets=[valid_data], callbacks=callbacks)
3884
3885
3886
3887
3888
3889
3890
3891
3892
3893
3894
3895
3896
3897
3898
3899
3900

        # test that the predict once with all iterations equals summed results with start_iteration and num_iteration
        all_pred = booster.predict(X, raw_score=True)
        all_pred_contrib = booster.predict(X, pred_contrib=True)
        steps = [10, 12]
        for step in steps:
            pred = np.zeros_like(all_pred)
            pred_contrib = np.zeros_like(all_pred_contrib)
            for start_iter in range(0, 50, step):
                pred += booster.predict(X, start_iteration=start_iter, num_iteration=step, raw_score=True)
                pred_contrib += booster.predict(X, start_iteration=start_iter, num_iteration=step, pred_contrib=True)
            np.testing.assert_allclose(all_pred, pred)
            np.testing.assert_allclose(all_pred_contrib, pred_contrib)
        # test the case where start_iteration <= 0, and num_iteration is None
        pred1 = booster.predict(X, start_iteration=-1)
        pred2 = booster.predict(X, num_iteration=booster.best_iteration)
        np.testing.assert_allclose(pred1, pred2)
3901

3902
3903
3904
3905
3906
3907
3908
3909
3910
3911
3912
3913
3914
3915
3916
3917
3918
3919
3920
3921
3922
3923
        # test the case where start_iteration > 0, and num_iteration <= 0
        pred4 = booster.predict(X, start_iteration=10, num_iteration=-1)
        pred5 = booster.predict(X, start_iteration=10, num_iteration=90)
        pred6 = booster.predict(X, start_iteration=10, num_iteration=0)
        np.testing.assert_allclose(pred4, pred5)
        np.testing.assert_allclose(pred4, pred6)

        # test the case where start_iteration > 0, and num_iteration <= 0, with pred_leaf=True
        pred4 = booster.predict(X, start_iteration=10, num_iteration=-1, pred_leaf=True)
        pred5 = booster.predict(X, start_iteration=10, num_iteration=40, pred_leaf=True)
        pred6 = booster.predict(X, start_iteration=10, num_iteration=0, pred_leaf=True)
        np.testing.assert_allclose(pred4, pred5)
        np.testing.assert_allclose(pred4, pred6)

        # test the case where start_iteration > 0, and num_iteration <= 0, with pred_contrib=True
        pred4 = booster.predict(X, start_iteration=10, num_iteration=-1, pred_contrib=True)
        pred5 = booster.predict(X, start_iteration=10, num_iteration=40, pred_contrib=True)
        pred6 = booster.predict(X, start_iteration=10, num_iteration=0, pred_contrib=True)
        np.testing.assert_allclose(pred4, pred5)
        np.testing.assert_allclose(pred4, pred6)

    # test for regression
3924
    X, y = make_synthetic_regression()
3925
    params = {"objective": "regression", "verbose": -1, "metric": "l2", "learning_rate": 0.5}
3926
3927
3928
3929
3930
3931
3932
    # test both with and without early stopping
    inner_test(X, y, params, early_stopping_rounds=1)
    inner_test(X, y, params, early_stopping_rounds=5)
    inner_test(X, y, params, early_stopping_rounds=None)

    # test for multi-class
    X, y = load_iris(return_X_y=True)
3933
    params = {"objective": "multiclass", "num_class": 3, "verbose": -1, "metric": "multi_error"}
3934
3935
3936
3937
3938
3939
3940
    # test both with and without early stopping
    inner_test(X, y, params, early_stopping_rounds=1)
    inner_test(X, y, params, early_stopping_rounds=5)
    inner_test(X, y, params, early_stopping_rounds=None)

    # test for binary
    X, y = load_breast_cancer(return_X_y=True)
3941
    params = {"objective": "binary", "verbose": -1, "metric": "auc"}
3942
3943
3944
3945
3946
3947
    # test both with and without early stopping
    inner_test(X, y, params, early_stopping_rounds=1)
    inner_test(X, y, params, early_stopping_rounds=5)
    inner_test(X, y, params, early_stopping_rounds=None)


3948
3949
3950
3951
3952
3953
3954
3955
3956
3957
3958
3959
3960
3961
3962
3963
3964
3965
3966
3967
3968
3969
3970
3971
3972
3973
3974
3975
@pytest.mark.parametrize("use_init_score", [False, True])
def test_predict_stump(rng, use_init_score):
    X, y = load_breast_cancer(return_X_y=True)
    dataset_kwargs = {"data": X, "label": y}
    if use_init_score:
        dataset_kwargs.update({"init_score": rng.uniform(size=y.shape)})
    bst = lgb.train(
        train_set=lgb.Dataset(**dataset_kwargs),
        params={"objective": "binary", "min_data_in_leaf": X.shape[0]},
        num_boost_round=5,
    )
    # checking prediction from 1 iteration and the whole model, to prevent bugs
    # of the form "a model of n stumps predicts n * initial_score"
    preds_1 = bst.predict(X, raw_score=True, num_iteration=1)
    preds_all = bst.predict(X, raw_score=True)
    if use_init_score:
        # if init_score was provided, a model of stumps should predict all 0s
        all_zeroes = np.full_like(preds_1, fill_value=0.0)
        np.testing.assert_allclose(preds_1, all_zeroes)
        np.testing.assert_allclose(preds_all, all_zeroes)
    else:
        # if init_score was not provided, prediction for a model of stumps should be
        # the "average" of the labels
        y_avg = np.log(y.mean() / (1.0 - y.mean()))
        np.testing.assert_allclose(preds_1, np.full_like(preds_1, fill_value=y_avg))
        np.testing.assert_allclose(preds_all, np.full_like(preds_all, fill_value=y_avg))


3976
3977
3978
3979
3980
3981
3982
3983
3984
3985
def test_predict_regression_output_shape():
    n_samples = 1_000
    n_features = 4
    X, y = make_synthetic_regression(n_samples=n_samples, n_features=n_features)
    dtrain = lgb.Dataset(X, label=y)
    params = {"objective": "regression", "verbosity": -1}

    # 1-round model
    bst = lgb.train(params, dtrain, num_boost_round=1)
    assert bst.predict(X).shape == (n_samples,)
3986
    assert bst.predict(X, raw_score=True).shape == (n_samples,)
3987
3988
3989
3990
3991
3992
    assert bst.predict(X, pred_contrib=True).shape == (n_samples, n_features + 1)
    assert bst.predict(X, pred_leaf=True).shape == (n_samples, 1)

    # 2-round model
    bst = lgb.train(params, dtrain, num_boost_round=2)
    assert bst.predict(X).shape == (n_samples,)
3993
    assert bst.predict(X, raw_score=True).shape == (n_samples,)
3994
3995
3996
3997
3998
3999
4000
4001
4002
4003
4004
4005
4006
4007
    assert bst.predict(X, pred_contrib=True).shape == (n_samples, n_features + 1)
    assert bst.predict(X, pred_leaf=True).shape == (n_samples, 2)


def test_predict_binary_classification_output_shape():
    n_samples = 1_000
    n_features = 4
    X, y = make_classification(n_samples=n_samples, n_features=n_features, n_classes=2)
    dtrain = lgb.Dataset(X, label=y)
    params = {"objective": "binary", "verbosity": -1}

    # 1-round model
    bst = lgb.train(params, dtrain, num_boost_round=1)
    assert bst.predict(X).shape == (n_samples,)
4008
    assert bst.predict(X, raw_score=True).shape == (n_samples,)
4009
4010
4011
4012
4013
4014
    assert bst.predict(X, pred_contrib=True).shape == (n_samples, n_features + 1)
    assert bst.predict(X, pred_leaf=True).shape == (n_samples, 1)

    # 2-round model
    bst = lgb.train(params, dtrain, num_boost_round=2)
    assert bst.predict(X).shape == (n_samples,)
4015
    assert bst.predict(X, raw_score=True).shape == (n_samples,)
4016
4017
4018
4019
4020
4021
4022
4023
4024
4025
4026
4027
4028
4029
4030
    assert bst.predict(X, pred_contrib=True).shape == (n_samples, n_features + 1)
    assert bst.predict(X, pred_leaf=True).shape == (n_samples, 2)


def test_predict_multiclass_classification_output_shape():
    n_samples = 1_000
    n_features = 10
    n_classes = 3
    X, y = make_classification(n_samples=n_samples, n_features=n_features, n_classes=n_classes, n_informative=6)
    dtrain = lgb.Dataset(X, label=y)
    params = {"objective": "multiclass", "verbosity": -1, "num_class": n_classes}

    # 1-round model
    bst = lgb.train(params, dtrain, num_boost_round=1)
    assert bst.predict(X).shape == (n_samples, n_classes)
4031
    assert bst.predict(X, raw_score=True).shape == (n_samples, n_classes)
4032
4033
4034
4035
4036
4037
    assert bst.predict(X, pred_contrib=True).shape == (n_samples, n_classes * (n_features + 1))
    assert bst.predict(X, pred_leaf=True).shape == (n_samples, n_classes)

    # 2-round model
    bst = lgb.train(params, dtrain, num_boost_round=2)
    assert bst.predict(X).shape == (n_samples, n_classes)
4038
    assert bst.predict(X, raw_score=True).shape == (n_samples, n_classes)
4039
4040
4041
4042
    assert bst.predict(X, pred_contrib=True).shape == (n_samples, n_classes * (n_features + 1))
    assert bst.predict(X, pred_leaf=True).shape == (n_samples, n_classes * 2)


4043
4044
4045
def test_average_precision_metric():
    # test against sklearn average precision metric
    X, y = load_breast_cancer(return_X_y=True)
4046
    params = {"objective": "binary", "metric": "average_precision", "verbose": -1}
4047
4048
    res = {}
    lgb_X = lgb.Dataset(X, label=y)
4049
4050
    est = lgb.train(params, lgb_X, num_boost_round=10, valid_sets=[lgb_X], callbacks=[lgb.record_evaluation(res)])
    ap = res["training"]["average_precision"][-1]
4051
4052
4053
4054
4055
4056
4057
    pred = est.predict(X)
    sklearn_ap = average_precision_score(y, pred)
    assert ap == pytest.approx(sklearn_ap)
    # test that average precision is 1 where model predicts perfectly
    y = y.copy()
    y[:] = 1
    lgb_X = lgb.Dataset(X, label=y)
4058
4059
    lgb.train(params, lgb_X, num_boost_round=1, valid_sets=[lgb_X], callbacks=[lgb.record_evaluation(res)])
    assert res["training"]["average_precision"][-1] == pytest.approx(1)
4060
4061


4062
4063
4064
4065
4066
4067
4068
4069
4070
4071
4072
4073
4074
4075
4076
4077
4078
4079
4080
4081
4082
4083
4084
def test_r2_metric():
    # test against sklearn R2 metric
    X, y = make_synthetic_regression()
    params = {"objective": "regression", "metric": "r2", "verbose": -1}
    res = {}
    train_data = lgb.Dataset(X, label=y)
    est = lgb.train(
        params, train_data, num_boost_round=1, valid_sets=[train_data], callbacks=[lgb.record_evaluation(res)]
    )
    r2 = res["training"]["r2"][-1]
    pred = est.predict(X)
    sklearn_r2 = r2_score(y, pred)
    assert r2 == pytest.approx(sklearn_r2)
    assert r2 != 0
    assert r2 != 1
    # test that R2 is 1 when y has no variance and the model predicts perfectly
    y = y.copy()
    y[:] = 1
    lgb_X = lgb.Dataset(X, label=y)
    lgb.train(params, lgb_X, num_boost_round=1, valid_sets=[lgb_X], callbacks=[lgb.record_evaluation(res)])
    assert res["training"]["r2"][-1] == pytest.approx(1)


4085
4086
4087
4088
def test_reset_params_works_with_metric_num_class_and_boosting():
    X, y = load_breast_cancer(return_X_y=True)
    dataset_params = {"max_bin": 150}
    booster_params = {
4089
4090
4091
4092
4093
4094
        "objective": "multiclass",
        "max_depth": 4,
        "bagging_fraction": 0.8,
        "metric": ["multi_logloss", "multi_error"],
        "boosting": "gbdt",
        "num_class": 5,
4095
4096
    }
    dtrain = lgb.Dataset(X, y, params=dataset_params)
4097
    bst = lgb.Booster(params=booster_params, train_set=dtrain)
4098
4099
4100
4101

    expected_params = dict(dataset_params, **booster_params)
    assert bst.params == expected_params

4102
    booster_params["bagging_fraction"] += 0.1
4103
4104
4105
4106
4107
    new_bst = bst.reset_parameter(booster_params)

    expected_params = dict(dataset_params, **booster_params)
    assert bst.params == expected_params
    assert new_bst.params == expected_params
4108
4109


4110
4111
@pytest.mark.parametrize("linear_tree", [False, True])
def test_dump_model_stump(linear_tree):
4112
    X, y = load_breast_cancer(return_X_y=True)
4113

4114
    train_data = lgb.Dataset(X, label=y)
4115
4116
4117
4118
4119
4120
4121
4122
4123
4124
4125
4126
4127
4128
4129
4130
4131
4132
4133
    params = {"objective": "binary", "verbose": -1, "linear_tree": linear_tree, "min_data_in_leaf": len(y)}
    bst = lgb.train(params, train_data, num_boost_round=5)
    dumped_model = bst.dump_model(num_iteration=5, start_iteration=0)
    tree_structure = dumped_model["tree_info"][0]["tree_structure"]
    assert len(dumped_model["tree_info"]) == 1
    assert "leaf_value" in tree_structure
    assert tree_structure["leaf_count"] == len(y)


def test_dump_model():
    initial_score_offset = 57.5
    X, y = make_synthetic_regression()
    train_data = lgb.Dataset(X, label=y + initial_score_offset)

    params = {
        "objective": "regression",
        "verbose": -1,
        "boost_from_average": True,
    }
4134
    bst = lgb.train(params, train_data, num_boost_round=5)
4135
4136
    dumped_model = bst.dump_model(num_iteration=5, start_iteration=0)
    dumped_model_str = str(dumped_model)
4137
4138
4139
4140
4141
    assert "leaf_features" not in dumped_model_str
    assert "leaf_coeff" not in dumped_model_str
    assert "leaf_const" not in dumped_model_str
    assert "leaf_value" in dumped_model_str
    assert "leaf_count" in dumped_model_str
4142
4143
4144
4145
4146
4147
4148
4149
4150
4151
4152
4153
4154
4155
4156
4157
4158

    for tree in dumped_model["tree_info"]:
        assert tree["tree_structure"]["internal_value"] != 0

    assert dumped_model["tree_info"][0]["tree_structure"]["internal_value"] == pytest.approx(
        initial_score_offset, abs=1
    )
    assert_all_trees_valid(dumped_model)


def test_dump_model_linear():
    X, y = load_breast_cancer(return_X_y=True)
    params = {
        "objective": "binary",
        "verbose": -1,
        "linear_tree": True,
    }
4159
4160
    train_data = lgb.Dataset(X, label=y)
    bst = lgb.train(params, train_data, num_boost_round=5)
4161
4162
4163
    dumped_model = bst.dump_model(num_iteration=5, start_iteration=0)
    assert_all_trees_valid(dumped_model)
    dumped_model_str = str(dumped_model)
4164
4165
4166
4167
4168
    assert "leaf_features" in dumped_model_str
    assert "leaf_coeff" in dumped_model_str
    assert "leaf_const" in dumped_model_str
    assert "leaf_value" in dumped_model_str
    assert "leaf_count" in dumped_model_str
4169
4170
4171
4172


def test_dump_model_hook():
    def hook(obj):
4173
4174
4175
        if "leaf_value" in obj:
            obj["LV"] = obj["leaf_value"]
            del obj["leaf_value"]
4176
4177
4178
4179
        return obj

    X, y = load_breast_cancer(return_X_y=True)
    train_data = lgb.Dataset(X, label=y)
4180
    params = {"objective": "binary", "verbose": -1}
4181
4182
4183
4184
    bst = lgb.train(params, train_data, num_boost_round=5)
    dumped_model_str = str(bst.dump_model(5, 0, object_hook=hook))
    assert "leaf_value" not in dumped_model_str
    assert "LV" in dumped_model_str
4185
4186


4187
@pytest.mark.skipif(getenv("TASK", "") == "cuda", reason="Forced splits are not yet supported by CUDA version")
4188
def test_force_split_with_feature_fraction(tmp_path):
4189
    X, y = make_synthetic_regression()
4190
4191
4192
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
    lgb_train = lgb.Dataset(X_train, y_train)

4193
    forced_split = {"feature": 0, "threshold": 0.5, "right": {"feature": 2, "threshold": 10.0}}
4194
4195
4196
4197
4198
4199
4200
4201
4202
4203

    tmp_split_file = tmp_path / "forced_split.json"
    with open(tmp_split_file, "w") as f:
        f.write(json.dumps(forced_split))

    params = {
        "objective": "regression",
        "feature_fraction": 0.6,
        "force_col_wise": True,
        "feature_fraction_seed": 1,
4204
        "forcedsplits_filename": tmp_split_file,
4205
4206
4207
4208
    }

    gbm = lgb.train(params, lgb_train)
    ret = mean_absolute_error(y_test, gbm.predict(X_test))
4209
    assert ret < 15.7
4210
4211
4212
4213
4214

    tree_info = gbm.dump_model()["tree_info"]
    assert len(tree_info) > 1
    for tree in tree_info:
        tree_structure = tree["tree_structure"]
4215
        assert tree_structure["split_feature"] == 0
4216
4217


4218
4219
4220
4221
4222
4223
def test_goss_boosting_and_strategy_equivalent():
    X, y = make_synthetic_regression(n_samples=10_000, n_features=10, n_informative=5, random_state=42)
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
    lgb_train = lgb.Dataset(X_train, y_train)
    lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train)
    base_params = {
4224
4225
4226
4227
4228
4229
4230
        "metric": "l2",
        "verbose": -1,
        "bagging_seed": 0,
        "learning_rate": 0.05,
        "num_threads": 1,
        "force_row_wise": True,
        "gpu_use_dp": True,
4231
    }
4232
    params1 = {**base_params, "boosting": "goss"}
4233
    evals_result1 = {}
4234
4235
4236
4237
    lgb.train(
        params1, lgb_train, num_boost_round=10, valid_sets=lgb_eval, callbacks=[lgb.record_evaluation(evals_result1)]
    )
    params2 = {**base_params, "data_sample_strategy": "goss"}
4238
    evals_result2 = {}
4239
4240
4241
4242
    lgb.train(
        params2, lgb_train, num_boost_round=10, valid_sets=lgb_eval, callbacks=[lgb.record_evaluation(evals_result2)]
    )
    assert evals_result1["valid_0"]["l2"] == evals_result2["valid_0"]["l2"]
4243
4244
4245
4246
4247
4248
4249
4250
4251


def test_sample_strategy_with_boosting():
    X, y = make_synthetic_regression(n_samples=10_000, n_features=10, n_informative=5, random_state=42)
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
    lgb_train = lgb.Dataset(X_train, y_train)
    lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train)

    base_params = {
4252
4253
4254
4255
4256
        "metric": "l2",
        "verbose": -1,
        "num_threads": 1,
        "force_row_wise": True,
        "gpu_use_dp": True,
4257
4258
    }

4259
    params1 = {**base_params, "boosting": "dart", "data_sample_strategy": "goss"}
4260
    evals_result = {}
4261
4262
4263
4264
    gbm = lgb.train(
        params1, lgb_train, num_boost_round=10, valid_sets=lgb_eval, callbacks=[lgb.record_evaluation(evals_result)]
    )
    eval_res1 = evals_result["valid_0"]["l2"][-1]
4265
4266
4267
4268
    test_res1 = mean_squared_error(y_test, gbm.predict(X_test))
    assert test_res1 == pytest.approx(3149.393862, abs=1.0)
    assert eval_res1 == pytest.approx(test_res1)

4269
    params2 = {**base_params, "boosting": "gbdt", "data_sample_strategy": "goss"}
4270
    evals_result = {}
4271
4272
4273
4274
    gbm = lgb.train(
        params2, lgb_train, num_boost_round=10, valid_sets=lgb_eval, callbacks=[lgb.record_evaluation(evals_result)]
    )
    eval_res2 = evals_result["valid_0"]["l2"][-1]
4275
4276
4277
4278
    test_res2 = mean_squared_error(y_test, gbm.predict(X_test))
    assert test_res2 == pytest.approx(2547.715968, abs=1.0)
    assert eval_res2 == pytest.approx(test_res2)

4279
    params3 = {**base_params, "boosting": "goss", "data_sample_strategy": "goss"}
4280
    evals_result = {}
4281
4282
4283
4284
    gbm = lgb.train(
        params3, lgb_train, num_boost_round=10, valid_sets=lgb_eval, callbacks=[lgb.record_evaluation(evals_result)]
    )
    eval_res3 = evals_result["valid_0"]["l2"][-1]
4285
4286
4287
4288
    test_res3 = mean_squared_error(y_test, gbm.predict(X_test))
    assert test_res3 == pytest.approx(2547.715968, abs=1.0)
    assert eval_res3 == pytest.approx(test_res3)

4289
    params4 = {**base_params, "boosting": "rf", "data_sample_strategy": "goss"}
4290
    evals_result = {}
4291
4292
4293
4294
    gbm = lgb.train(
        params4, lgb_train, num_boost_round=10, valid_sets=lgb_eval, callbacks=[lgb.record_evaluation(evals_result)]
    )
    eval_res4 = evals_result["valid_0"]["l2"][-1]
4295
4296
4297
4298
4299
4300
4301
4302
4303
4304
4305
4306
4307
    test_res4 = mean_squared_error(y_test, gbm.predict(X_test))
    assert test_res4 == pytest.approx(2095.538735, abs=1.0)
    assert eval_res4 == pytest.approx(test_res4)

    assert test_res1 != test_res2
    assert eval_res1 != eval_res2
    assert test_res2 == test_res3
    assert eval_res2 == eval_res3
    assert eval_res1 != eval_res4
    assert test_res1 != test_res4
    assert eval_res2 != eval_res4
    assert test_res2 != test_res4

4308
4309
4310
4311
4312
4313
4314
    params5 = {
        **base_params,
        "boosting": "dart",
        "data_sample_strategy": "bagging",
        "bagging_freq": 1,
        "bagging_fraction": 0.5,
    }
4315
    evals_result = {}
4316
4317
4318
4319
    gbm = lgb.train(
        params5, lgb_train, num_boost_round=10, valid_sets=lgb_eval, callbacks=[lgb.record_evaluation(evals_result)]
    )
    eval_res5 = evals_result["valid_0"]["l2"][-1]
4320
4321
4322
4323
    test_res5 = mean_squared_error(y_test, gbm.predict(X_test))
    assert test_res5 == pytest.approx(3134.866931, abs=1.0)
    assert eval_res5 == pytest.approx(test_res5)

4324
4325
4326
4327
4328
4329
4330
    params6 = {
        **base_params,
        "boosting": "gbdt",
        "data_sample_strategy": "bagging",
        "bagging_freq": 1,
        "bagging_fraction": 0.5,
    }
4331
    evals_result = {}
4332
4333
4334
4335
    gbm = lgb.train(
        params6, lgb_train, num_boost_round=10, valid_sets=lgb_eval, callbacks=[lgb.record_evaluation(evals_result)]
    )
    eval_res6 = evals_result["valid_0"]["l2"][-1]
4336
4337
4338
4339
4340
4341
    test_res6 = mean_squared_error(y_test, gbm.predict(X_test))
    assert test_res6 == pytest.approx(2539.792378, abs=1.0)
    assert eval_res6 == pytest.approx(test_res6)
    assert test_res5 != test_res6
    assert eval_res5 != eval_res6

4342
4343
4344
4345
4346
4347
4348
    params7 = {
        **base_params,
        "boosting": "rf",
        "data_sample_strategy": "bagging",
        "bagging_freq": 1,
        "bagging_fraction": 0.5,
    }
4349
    evals_result = {}
4350
4351
4352
4353
    gbm = lgb.train(
        params7, lgb_train, num_boost_round=10, valid_sets=lgb_eval, callbacks=[lgb.record_evaluation(evals_result)]
    )
    eval_res7 = evals_result["valid_0"]["l2"][-1]
4354
4355
4356
4357
4358
4359
4360
4361
4362
    test_res7 = mean_squared_error(y_test, gbm.predict(X_test))
    assert test_res7 == pytest.approx(1518.704481, abs=1.0)
    assert eval_res7 == pytest.approx(test_res7)
    assert test_res5 != test_res7
    assert eval_res5 != eval_res7
    assert test_res6 != test_res7
    assert eval_res6 != eval_res7


4363
4364
4365
4366
4367
def test_record_evaluation_with_train():
    X, y = make_synthetic_regression()
    ds = lgb.Dataset(X, y)
    eval_result = {}
    callbacks = [lgb.record_evaluation(eval_result)]
4368
    params = {"objective": "l2", "num_leaves": 3}
4369
4370
    num_boost_round = 5
    bst = lgb.train(params, ds, num_boost_round=num_boost_round, valid_sets=[ds], callbacks=callbacks)
4371
    assert list(eval_result.keys()) == ["training"]
4372
4373
4374
4375
4376
    train_mses = []
    for i in range(num_boost_round):
        pred = bst.predict(X, num_iteration=i + 1)
        mse = mean_squared_error(y, pred)
        train_mses.append(mse)
4377
    np.testing.assert_allclose(eval_result["training"]["l2"], train_mses)
4378
4379


4380
@pytest.mark.parametrize("train_metric", [False, True])
4381
4382
4383
4384
4385
def test_record_evaluation_with_cv(train_metric):
    X, y = make_synthetic_regression()
    ds = lgb.Dataset(X, y)
    eval_result = {}
    callbacks = [lgb.record_evaluation(eval_result)]
4386
4387
4388
4389
4390
4391
    metrics = ["l2", "rmse"]
    params = {"objective": "l2", "num_leaves": 3, "metric": metrics}
    cv_hist = lgb.cv(
        params, ds, num_boost_round=5, stratified=False, callbacks=callbacks, eval_train_metric=train_metric
    )
    expected_datasets = {"valid"}
4392
    if train_metric:
4393
        expected_datasets.add("train")
4394
4395
4396
    assert set(eval_result.keys()) == expected_datasets
    for dataset in expected_datasets:
        for metric in metrics:
4397
4398
4399
            for agg in ("mean", "stdv"):
                key = f"{dataset} {metric}-{agg}"
                np.testing.assert_allclose(cv_hist[key], eval_result[dataset][f"{metric}-{agg}"])
4400
4401


4402
def test_pandas_with_numpy_regular_dtypes(rng_fixed_seed):
4403
4404
4405
4406
    pd = pytest.importorskip("pandas")
    uints = ["uint8", "uint16", "uint32", "uint64"]
    ints = ["int8", "int16", "int32", "int64"]
    bool_and_floats = ["bool", "float16", "float32", "float64"]
4407
4408
4409

    n_samples = 100
    # data as float64
4410
4411
    df = pd.DataFrame(
        {
4412
4413
4414
4415
            "x1": rng_fixed_seed.integers(low=0, high=2, size=n_samples),
            "x2": rng_fixed_seed.integers(low=1, high=3, size=n_samples),
            "x3": 10 * rng_fixed_seed.integers(low=1, high=3, size=n_samples),
            "x4": 100 * rng_fixed_seed.integers(low=1, high=3, size=n_samples),
4416
4417
        }
    )
4418
    df = df.astype(np.float64)
4419
    y = df["x1"] * (df["x2"] + df["x3"] + df["x4"])
4420
    ds = lgb.Dataset(df, y)
4421
    params = {"objective": "l2", "num_leaves": 31, "min_child_samples": 1}
4422
4423
4424
4425
    bst = lgb.train(params, ds, num_boost_round=5)
    preds = bst.predict(df)

    # test all features were used
4426
    assert bst.trees_to_dataframe()["split_feature"].nunique() == df.shape[1]
4427
4428
4429
4430
4431
4432
    # test the score is better than predicting the mean
    baseline = np.full_like(y, y.mean())
    assert mean_squared_error(y, preds) < mean_squared_error(y, baseline)

    # test all predictions are equal using different input dtypes
    for target_dtypes in [uints, ints, bool_and_floats]:
4433
        df2 = df.astype({f"x{i}": dtype for i, dtype in enumerate(target_dtypes, start=1)})
4434
4435
4436
4437
4438
4439
4440
        assert df2.dtypes.tolist() == target_dtypes
        ds2 = lgb.Dataset(df2, y)
        bst2 = lgb.train(params, ds2, num_boost_round=5)
        preds2 = bst2.predict(df2)
        np.testing.assert_allclose(preds, preds2)


4441
def test_pandas_nullable_dtypes(rng_fixed_seed):
4442
4443
4444
    pd = pytest.importorskip("pandas")
    df = pd.DataFrame(
        {
4445
            "x1": rng_fixed_seed.integers(low=1, high=3, size=100),
4446
            "x2": np.linspace(-1, 1, 100),
4447
4448
            "x3": pd.arrays.SparseArray(rng_fixed_seed.integers(low=0, high=11, size=100)),
            "x4": rng_fixed_seed.uniform(size=(100,)) < 0.5,
4449
4450
        }
    )
4451
    # introduce some missing values
4452
4453
    df.loc[1, "x1"] = np.nan
    df.loc[2, "x2"] = np.nan
4454
    # in recent versions of pandas, type 'bool' is incompatible with nan values in x4
4455
    df["x4"] = df["x4"].astype(np.float64)
4456
    df.loc[3, "x4"] = np.nan
4457
    y = df["x1"] * df["x2"] + df["x3"] * (1 + df["x4"])
4458
4459
4460
    y = y.fillna(0)

    # train with regular dtypes
4461
    params = {"objective": "l2", "num_leaves": 31, "min_child_samples": 1}
4462
4463
4464
4465
4466
4467
    ds = lgb.Dataset(df, y)
    bst = lgb.train(params, ds, num_boost_round=5)
    preds = bst.predict(df)

    # convert to nullable dtypes
    df2 = df.copy()
4468
4469
4470
    df2["x1"] = df2["x1"].astype("Int32")
    df2["x2"] = df2["x2"].astype("Float64")
    df2["x4"] = df2["x4"].astype("boolean")
4471
4472
4473
4474
4475
4476
4477
4478

    # test training succeeds
    ds_nullable_dtypes = lgb.Dataset(df2, y)
    bst_nullable_dtypes = lgb.train(params, ds_nullable_dtypes, num_boost_round=5)
    preds_nullable_dtypes = bst_nullable_dtypes.predict(df2)

    trees_df = bst_nullable_dtypes.trees_to_dataframe()
    # test all features were used
4479
    assert trees_df["split_feature"].nunique() == df.shape[1]
4480
4481
4482
4483
4484
4485
    # test the score is better than predicting the mean
    baseline = np.full_like(y, y.mean())
    assert mean_squared_error(y, preds) < mean_squared_error(y, baseline)

    # test equal predictions
    np.testing.assert_allclose(preds, preds_nullable_dtypes)
4486
4487
4488
4489
4490


def test_boost_from_average_with_single_leaf_trees():
    # test data are taken from bug report
    # https://github.com/microsoft/LightGBM/issues/4708
4491
4492
4493
4494
4495
4496
4497
4498
4499
4500
4501
    X = np.array(
        [
            [1021.0589, 1018.9578],
            [1023.85754, 1018.7854],
            [1024.5468, 1018.88513],
            [1019.02954, 1018.88513],
            [1016.79926, 1018.88513],
            [1007.6, 1018.88513],
        ],
        dtype=np.float32,
    )
4502
4503
4504
4505
4506
4507
4508
4509
4510
4511
4512
4513
4514
4515
4516
4517
    y = np.array([1023.8, 1024.6, 1024.4, 1023.8, 1022.0, 1014.4], dtype=np.float32)
    params = {
        "extra_trees": True,
        "min_data_in_bin": 1,
        "extra_seed": 7,
        "objective": "regression",
        "verbose": -1,
        "boost_from_average": True,
        "min_data_in_leaf": 1,
    }
    train_set = lgb.Dataset(X, y)
    model = lgb.train(params=params, train_set=train_set, num_boost_round=10)

    preds = model.predict(X)
    mean_preds = np.mean(preds)
    assert y.min() <= mean_preds <= y.max()
4518
4519


4520
def test_cegb_split_buffer_clean(rng_fixed_seed):
4521
4522
4523
4524
4525
4526
4527
4528
    # modified from https://github.com/microsoft/LightGBM/issues/3679#issuecomment-938652811
    # and https://github.com/microsoft/LightGBM/pull/5087
    # test that the ``splits_per_leaf_`` of CEGB is cleaned before training a new tree
    # which is done in the fix #5164
    # without the fix:
    #    Check failed: (best_split_info.left_count) > (0)

    R, C = 1000, 100
4529
    data = rng_fixed_seed.standard_normal(size=(R, C))
4530
    for i in range(1, C):
4531
        data[i] += data[0] * rng_fixed_seed.standard_normal()
4532
4533
4534
4535
4536
4537
4538
4539
4540
4541

    N = int(0.8 * len(data))
    train_data = data[:N]
    test_data = data[N:]
    train_y = np.sum(train_data, axis=1)
    test_y = np.sum(test_data, axis=1)

    train = lgb.Dataset(train_data, train_y, free_raw_data=True)

    params = {
4542
4543
4544
4545
4546
4547
4548
4549
4550
4551
4552
4553
4554
        "boosting_type": "gbdt",
        "objective": "regression",
        "max_bin": 255,
        "num_leaves": 31,
        "seed": 0,
        "learning_rate": 0.1,
        "min_data_in_leaf": 0,
        "verbose": -1,
        "min_split_gain": 1000.0,
        "cegb_penalty_feature_coupled": 5 * np.arange(C),
        "cegb_penalty_split": 0.0002,
        "cegb_tradeoff": 10.0,
        "force_col_wise": True,
4555
4556
4557
4558
4559
4560
    }

    model = lgb.train(params, train, num_boost_round=10)
    predicts = model.predict(test_data)
    rmse = np.sqrt(mean_squared_error(test_y, predicts))
    assert rmse < 10.0
4561
4562


4563
4564
4565
4566
def test_verbosity_and_verbose(capsys):
    X, y = make_synthetic_regression()
    ds = lgb.Dataset(X, y)
    params = {
4567
4568
4569
        "num_leaves": 3,
        "verbose": 1,
        "verbosity": 0,
4570
4571
    }
    lgb.train(params, ds, num_boost_round=1)
4572
    expected_msg = "[LightGBM] [Warning] verbosity is set=0, verbose=1 will be ignored. Current value: verbosity=0"
4573
4574
4575
4576
    stdout = capsys.readouterr().out
    assert expected_msg in stdout


4577
4578
4579
4580
4581
4582
4583
4584
4585
def test_verbosity_is_respected_when_using_custom_objective(capsys):
    X, y = make_synthetic_regression()
    ds = lgb.Dataset(X, y)
    params = {
        "objective": mse_obj,
        "nonsense": 123,
        "num_leaves": 3,
    }
    lgb.train({**params, "verbosity": -1}, ds, num_boost_round=1)
4586
    assert_silent(capsys)
4587
4588
4589
4590
    lgb.train({**params, "verbosity": 0}, ds, num_boost_round=1)
    assert "[LightGBM] [Warning] Unknown parameter: nonsense" in capsys.readouterr().out


4591
4592
@pytest.mark.parametrize("verbosity_param", lgb.basic._ConfigAliases.get("verbosity"))
@pytest.mark.parametrize("verbosity", [-1, 0])
4593
4594
4595
4596
def test_verbosity_can_suppress_alias_warnings(capsys, verbosity_param, verbosity):
    X, y = make_synthetic_regression()
    ds = lgb.Dataset(X, y)
    params = {
4597
4598
4599
4600
        "num_leaves": 3,
        "subsample": 0.75,
        "bagging_fraction": 0.8,
        "force_col_wise": True,
4601
4602
4603
4604
        verbosity_param: verbosity,
    }
    lgb.train(params, ds, num_boost_round=1)
    expected_msg = (
4605
4606
        "[LightGBM] [Warning] bagging_fraction is set=0.8, subsample=0.75 will be ignored. "
        "Current value: bagging_fraction=0.8"
4607
4608
4609
4610
4611
    )
    stdout = capsys.readouterr().out
    if verbosity >= 0:
        assert expected_msg in stdout
    else:
4612
        assert re.search(r"\[LightGBM\]", stdout) is None
4613
4614


4615
4616
4617
4618
4619
4620
4621
4622
4623
4624
4625
4626
4627
4628
4629
4630
4631
4632
4633
4634
4635
4636
4637
4638
4639
4640
4641
4642
4643
4644
4645
4646
4647
4648
4649
4650
4651
4652
4653
4654
4655
4656
4657
4658
4659
4660
4661
4662
4663
4664
4665
4666
4667
4668
4669
4670
4671
4672
4673
4674
4675
4676
4677
4678
4679
4680
4681
4682
4683
4684
4685
4686
4687
4688
4689
4690
4691
4692
4693
4694
4695
4696
4697
4698
4699
4700
4701
4702
4703
4704
4705
4706
4707
4708
4709
4710
4711
4712
4713
4714
4715
4716
4717
4718
4719
4720
4721
4722
4723
def test_cv_only_raises_num_rounds_warning_when_expected(capsys):
    X, y = make_synthetic_regression()
    ds = lgb.Dataset(X, y)
    base_params = {
        "num_leaves": 5,
        "objective": "regression",
        "verbosity": -1,
    }
    additional_kwargs = {"return_cvbooster": True, "stratified": False}

    # no warning: no aliases, all defaults
    cv_bst = lgb.cv({**base_params}, ds, **additional_kwargs)
    assert all(t == 100 for t in cv_bst["cvbooster"].num_trees())
    assert_silent(capsys)

    # no warning: no aliases, just num_boost_round
    cv_bst = lgb.cv({**base_params}, ds, num_boost_round=2, **additional_kwargs)
    assert all(t == 2 for t in cv_bst["cvbooster"].num_trees())
    assert_silent(capsys)

    # no warning: 1 alias + num_boost_round (both same value)
    cv_bst = lgb.cv({**base_params, "n_iter": 3}, ds, num_boost_round=3, **additional_kwargs)
    assert all(t == 3 for t in cv_bst["cvbooster"].num_trees())
    assert_silent(capsys)

    # no warning: 1 alias + num_boost_round (different values... value from params should win)
    cv_bst = lgb.cv({**base_params, "n_iter": 4}, ds, num_boost_round=3, **additional_kwargs)
    assert all(t == 4 for t in cv_bst["cvbooster"].num_trees())
    assert_silent(capsys)

    # no warning: 2 aliases (both same value)
    cv_bst = lgb.cv({**base_params, "n_iter": 3, "num_iterations": 3}, ds, **additional_kwargs)
    assert all(t == 3 for t in cv_bst["cvbooster"].num_trees())
    assert_silent(capsys)

    # no warning: 4 aliases (all same value)
    cv_bst = lgb.cv({**base_params, "n_iter": 3, "num_trees": 3, "nrounds": 3, "max_iter": 3}, ds, **additional_kwargs)
    assert all(t == 3 for t in cv_bst["cvbooster"].num_trees())
    assert_silent(capsys)

    # warning: 2 aliases (different values... "num_iterations" wins because it's the main param name)
    with pytest.warns(UserWarning, match="LightGBM will perform up to 5 boosting rounds"):
        cv_bst = lgb.cv({**base_params, "n_iter": 6, "num_iterations": 5}, ds, **additional_kwargs)
    assert all(t == 5 for t in cv_bst["cvbooster"].num_trees())
    # should not be any other logs (except the warning, intercepted by pytest)
    assert_silent(capsys)

    # warning: 2 aliases (different values... first one in the order from Config::parameter2aliases() wins)
    with pytest.warns(UserWarning, match="LightGBM will perform up to 4 boosting rounds"):
        cv_bst = lgb.cv({**base_params, "n_iter": 4, "max_iter": 5}, ds, **additional_kwargs)["cvbooster"]
    assert all(t == 4 for t in cv_bst.num_trees())
    # should not be any other logs (except the warning, intercepted by pytest)
    assert_silent(capsys)


def test_train_only_raises_num_rounds_warning_when_expected(capsys):
    X, y = make_synthetic_regression()
    ds = lgb.Dataset(X, y)
    base_params = {
        "num_leaves": 5,
        "objective": "regression",
        "verbosity": -1,
    }

    # no warning: no aliases, all defaults
    bst = lgb.train({**base_params}, ds)
    assert bst.num_trees() == 100
    assert_silent(capsys)

    # no warning: no aliases, just num_boost_round
    bst = lgb.train({**base_params}, ds, num_boost_round=2)
    assert bst.num_trees() == 2
    assert_silent(capsys)

    # no warning: 1 alias + num_boost_round (both same value)
    bst = lgb.train({**base_params, "n_iter": 3}, ds, num_boost_round=3)
    assert bst.num_trees() == 3
    assert_silent(capsys)

    # no warning: 1 alias + num_boost_round (different values... value from params should win)
    bst = lgb.train({**base_params, "n_iter": 4}, ds, num_boost_round=3)
    assert bst.num_trees() == 4
    assert_silent(capsys)

    # no warning: 2 aliases (both same value)
    bst = lgb.train({**base_params, "n_iter": 3, "num_iterations": 3}, ds)
    assert bst.num_trees() == 3
    assert_silent(capsys)

    # no warning: 4 aliases (all same value)
    bst = lgb.train({**base_params, "n_iter": 3, "num_trees": 3, "nrounds": 3, "max_iter": 3}, ds)
    assert bst.num_trees() == 3
    assert_silent(capsys)

    # warning: 2 aliases (different values... "num_iterations" wins because it's the main param name)
    with pytest.warns(UserWarning, match="LightGBM will perform up to 5 boosting rounds"):
        bst = lgb.train({**base_params, "n_iter": 6, "num_iterations": 5}, ds)
    assert bst.num_trees() == 5
    # should not be any other logs (except the warning, intercepted by pytest)
    assert_silent(capsys)

    # warning: 2 aliases (different values... first one in the order from Config::parameter2aliases() wins)
    with pytest.warns(UserWarning, match="LightGBM will perform up to 4 boosting rounds"):
        bst = lgb.train({**base_params, "n_iter": 4, "max_iter": 5}, ds)
    assert bst.num_trees() == 4
    # should not be any other logs (except the warning, intercepted by pytest)
    assert_silent(capsys)


4724
@pytest.mark.skipif(not PANDAS_INSTALLED, reason="pandas is not installed")
4725
4726
def test_validate_features():
    X, y = make_synthetic_regression()
4727
    features = ["x1", "x2", "x3", "x4"]
4728
4729
    df = pd_DataFrame(X, columns=features)
    ds = lgb.Dataset(df, y)
4730
    bst = lgb.train({"num_leaves": 15, "verbose": -1}, ds, num_boost_round=10)
4731
4732
4733
    assert bst.feature_name() == features

    # try to predict with a different feature
4734
    df2 = df.rename(columns={"x3": "z"})
4735
4736
4737
4738
4739
    with pytest.raises(lgb.basic.LightGBMError, match="Expected 'x3' at position 2 but found 'z'"):
        bst.predict(df2, validate_features=True)

    # check that disabling the check doesn't raise the error
    bst.predict(df2, validate_features=False)
4740
4741
4742
4743
4744
4745
4746

    # try to refit with a different feature
    with pytest.raises(lgb.basic.LightGBMError, match="Expected 'x3' at position 2 but found 'z'"):
        bst.refit(df2, y, validate_features=True)

    # check that disabling the check doesn't raise the error
    bst.refit(df2, y, validate_features=False)
4747
4748


4749
4750
4751
4752
4753
4754
4755
def test_train_and_cv_raise_informative_error_for_train_set_of_wrong_type():
    with pytest.raises(TypeError, match=r"train\(\) only accepts Dataset object, train_set has type 'list'\."):
        lgb.train({}, train_set=[])
    with pytest.raises(TypeError, match=r"cv\(\) only accepts Dataset object, train_set has type 'list'\."):
        lgb.cv({}, train_set=[])


4756
@pytest.mark.parametrize("num_boost_round", [-7, -1, 0])
4757
4758
def test_train_and_cv_raise_informative_error_for_impossible_num_boost_round(num_boost_round):
    X, y = make_synthetic_regression(n_samples=100)
4759
    error_msg = rf"Number of boosting rounds must be greater than 0\. Got {num_boost_round}\."
4760
4761
4762
4763
4764
4765
4766
4767
4768
    with pytest.raises(ValueError, match=error_msg):
        lgb.train({}, train_set=lgb.Dataset(X, y), num_boost_round=num_boost_round)
    with pytest.raises(ValueError, match=error_msg):
        lgb.cv({}, train_set=lgb.Dataset(X, y), num_boost_round=num_boost_round)


def test_train_raises_informative_error_if_any_valid_sets_are_not_dataset_objects():
    X, y = make_synthetic_regression(n_samples=100)
    X_valid = X * 2.0
4769
4770
4771
    with pytest.raises(
        TypeError, match=r"Every item in valid_sets must be a Dataset object\. Item 1 has type 'tuple'\."
    ):
4772
4773
4774
        lgb.train(
            params={},
            train_set=lgb.Dataset(X, y),
4775
            valid_sets=[lgb.Dataset(X_valid, y), ([1.0], [2.0]), [5.6, 5.7, 5.8]],
4776
4777
4778
        )


4779
4780
def test_train_raises_informative_error_for_params_of_wrong_type():
    X, y = make_synthetic_regression()
4781
    params = {"num_leaves": "too-many"}
4782
    dtrain = lgb.Dataset(X, label=y)
4783
    with pytest.raises(lgb.basic.LightGBMError, match='Parameter num_leaves should be of type int, got "too-many"'):
4784
        lgb.train(params, dtrain)
4785
4786
4787
4788
4789


def test_quantized_training():
    X, y = make_synthetic_regression()
    ds = lgb.Dataset(X, label=y)
4790
    bst_params = {"num_leaves": 15, "verbose": -1, "seed": 0}
4791
4792
    bst = lgb.train(bst_params, ds, num_boost_round=10)
    rmse = np.sqrt(np.mean((bst.predict(X) - y) ** 2))
4793
4794
4795
4796
4797
4798
4799
    bst_params.update(
        {
            "use_quantized_grad": True,
            "num_grad_quant_bins": 30,
            "quant_train_renew_leaf": True,
        }
    )
4800
4801
4802
    quant_bst = lgb.train(bst_params, ds, num_boost_round=10)
    quant_rmse = np.sqrt(np.mean((quant_bst.predict(X) - y) ** 2))
    assert quant_rmse < rmse + 6.0
4803
4804
4805
4806
4807
4808
4809
4810
4811
4812
4813
4814
4815
4816
4817
4818
4819
4820
4821
4822
4823
4824
4825


def test_bagging_by_query_in_lambdarank():
    rank_example_dir = Path(__file__).absolute().parents[2] / "examples" / "lambdarank"
    X_train, y_train = load_svmlight_file(str(rank_example_dir / "rank.train"))
    q_train = np.loadtxt(str(rank_example_dir / "rank.train.query"))
    X_test, y_test = load_svmlight_file(str(rank_example_dir / "rank.test"))
    q_test = np.loadtxt(str(rank_example_dir / "rank.test.query"))
    params = {"objective": "lambdarank", "verbose": -1, "metric": "ndcg", "ndcg_eval_at": [5]}
    lgb_train = lgb.Dataset(X_train, y_train, group=q_train, params=params)
    lgb_test = lgb.Dataset(X_test, y_test, group=q_test, params=params)
    gbm = lgb.train(params, lgb_train, num_boost_round=50, valid_sets=[lgb_test])
    ndcg_score = gbm.best_score["valid_0"]["ndcg@5"]

    params.update({"bagging_by_query": True, "bagging_fraction": 0.1, "bagging_freq": 1})
    gbm_bagging_by_query = lgb.train(params, lgb_train, num_boost_round=50, valid_sets=[lgb_test])
    ndcg_score_bagging_by_query = gbm_bagging_by_query.best_score["valid_0"]["ndcg@5"]

    params.update({"bagging_by_query": False, "bagging_fraction": 0.1, "bagging_freq": 1})
    gbm_no_bagging_by_query = lgb.train(params, lgb_train, num_boost_round=50, valid_sets=[lgb_test])
    ndcg_score_no_bagging_by_query = gbm_no_bagging_by_query.best_score["valid_0"]["ndcg@5"]
    assert ndcg_score_bagging_by_query >= ndcg_score - 0.1
    assert ndcg_score_no_bagging_by_query >= ndcg_score - 0.1
4826
4827
4828
4829


def test_equal_predict_from_row_major_and_col_major_data():
    X_row, y = make_synthetic_regression()
4830
4831
    assert X_row.flags["C_CONTIGUOUS"]
    assert not X_row.flags["F_CONTIGUOUS"]
4832
4833
4834
4835
4836
4837
    ds = lgb.Dataset(X_row, y)
    params = {"num_leaves": 8, "verbose": -1}
    bst = lgb.train(params, ds, num_boost_round=5)
    preds_row = bst.predict(X_row)

    X_col = np.asfortranarray(X_row)
4838
4839
    assert X_col.flags["F_CONTIGUOUS"]
    assert not X_col.flags["C_CONTIGUOUS"]
4840
4841
4842
    preds_col = bst.predict(X_col)

    np.testing.assert_allclose(preds_row, preds_col)