test_dask.py 65.9 KB
Newer Older
1
# coding: utf-8
2
3
"""Tests for lightgbm.dask module"""

4
import inspect
5
import random
6
import socket
7
8
from itertools import groupby
from os import getenv
9
from platform import machine
10
from sys import platform
11
from urllib.parse import urlparse
12
13

import pytest
14
from sklearn.metrics import accuracy_score, r2_score
15
16
17

import lightgbm as lgb

18
19
from .utils import sklearn_multiclass_custom_objective

20
21
22
23
if not platform.startswith("linux"):
    pytest.skip("lightgbm.dask is currently supported in Linux environments", allow_module_level=True)
if machine() != "x86_64":
    pytest.skip("lightgbm.dask tests are currently skipped on some architectures like arm64", allow_module_level=True)
24
if not lgb.compat.DASK_INSTALLED:
25
    pytest.skip("Dask is not installed", allow_module_level=True)
26
27
28
29
30

import dask.array as da
import dask.dataframe as dd
import numpy as np
import pandas as pd
31
import sklearn.utils.estimator_checks as sklearn_checks
32
from dask.array.utils import assert_eq
33
from dask.distributed import Client, LocalCluster, default_client, wait
34
from scipy.sparse import csc_matrix, csr_matrix
35
from scipy.stats import spearmanr
36
37
from sklearn.datasets import make_blobs, make_regression

38
from .utils import make_ranking, pickle_obj, unpickle_obj
39

40
41
42
43
tasks = ["binary-classification", "multiclass-classification", "regression", "ranking"]
distributed_training_algorithms = ["data", "voting"]
data_output = ["array", "scipy_csr_matrix", "dataframe", "dataframe-with-categorical"]
boosting_types = ["gbdt", "dart", "goss", "rf"]
44
group_sizes = [5, 5, 5, 10, 10, 10, 20, 20, 20, 50, 50]
45
task_to_dask_factory = {
46
47
48
49
    "regression": lgb.DaskLGBMRegressor,
    "binary-classification": lgb.DaskLGBMClassifier,
    "multiclass-classification": lgb.DaskLGBMClassifier,
    "ranking": lgb.DaskLGBMRanker,
50
51
}
task_to_local_factory = {
52
53
54
55
    "regression": lgb.LGBMRegressor,
    "binary-classification": lgb.LGBMClassifier,
    "multiclass-classification": lgb.LGBMClassifier,
    "ranking": lgb.LGBMRanker,
56
}
57
58

pytestmark = [
59
60
61
    pytest.mark.skipif(getenv("TASK", "") == "mpi", reason="Fails to run with MPI interface"),
    pytest.mark.skipif(getenv("TASK", "") == "gpu", reason="Fails to run with GPU interface"),
    pytest.mark.skipif(getenv("TASK", "") == "cuda", reason="Fails to run with CUDA interface"),
62
63
64
]


65
@pytest.fixture(scope="module")
66
67
68
69
70
71
def cluster():
    dask_cluster = LocalCluster(n_workers=2, threads_per_worker=2, dashboard_address=None)
    yield dask_cluster
    dask_cluster.close()


72
@pytest.fixture(scope="module")
73
74
75
76
77
78
def cluster2():
    dask_cluster = LocalCluster(n_workers=2, threads_per_worker=2, dashboard_address=None)
    yield dask_cluster
    dask_cluster.close()


79
@pytest.fixture(scope="module")
80
81
82
83
84
85
def cluster_three_workers():
    dask_cluster = LocalCluster(n_workers=3, threads_per_worker=1, dashboard_address=None)
    yield dask_cluster
    dask_cluster.close()


86
87
88
89
90
91
92
93
94
@pytest.fixture()
def listen_port():
    listen_port.port += 10
    return listen_port.port


listen_port.port = 13000


95
def _get_workers_hostname(cluster: LocalCluster) -> str:
96
    one_worker_address = next(iter(cluster.scheduler_info["workers"]))
97
98
99
    return urlparse(one_worker_address).hostname


100
def _create_ranking_data(n_samples=100, output="array", chunk_size=50, **kwargs):
101
    X, y, g = make_ranking(n_samples=n_samples, random_state=42, **kwargs)
102
103
    rnd = np.random.RandomState(42)
    w = rnd.rand(X.shape[0]) * 0.01
104
    g_rle = np.array([len(list(grp)) for _, grp in groupby(g)])
105

106
    if output.startswith("dataframe"):
107
        # add target, weight, and group to DataFrame so that partitions abide by group boundaries.
108
109
        X_df = pd.DataFrame(X, columns=[f"feature_{i}" for i in range(X.shape[1])])
        if output == "dataframe-with-categorical":
110
            for i in range(5):
111
                col_name = f"cat_col{i}"
112
113
                cat_values = rnd.choice(["a", "b"], X.shape[0])
                cat_series = pd.Series(cat_values, dtype="category")
114
                X_df[col_name] = cat_series
115
116
117
118
119
        X = X_df.copy()
        X_df = X_df.assign(y=y, g=g, w=w)

        # set_index ensures partitions are based on group id.
        # See https://stackoverflow.com/questions/49532824/dask-dataframe-split-partitions-based-on-a-column-or-function.
120
        X_df.set_index("g", inplace=True)
121
122
123
        dX = dd.from_pandas(X_df, chunksize=chunk_size)

        # separate target, weight from features.
124
125
126
        dy = dX["y"]
        dw = dX["w"]
        dX = dX.drop(columns=["y", "w"])
127
128
129
130
        dg = dX.index.to_series()

        # encode group identifiers into run-length encoding, the format LightGBMRanker is expecting
        # so that within each partition, sum(g) = n_samples.
131
132
        dg = dg.map_partitions(lambda p: p.groupby("g", sort=False).apply(lambda z: z.shape[0]))
    elif output == "array":
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
        # ranking arrays: one chunk per group. Each chunk must include all columns.
        p = X.shape[1]
        dX, dy, dw, dg = [], [], [], []
        for g_idx, rhs in enumerate(np.cumsum(g_rle)):
            lhs = rhs - g_rle[g_idx]
            dX.append(da.from_array(X[lhs:rhs, :], chunks=(rhs - lhs, p)))
            dy.append(da.from_array(y[lhs:rhs]))
            dw.append(da.from_array(w[lhs:rhs]))
            dg.append(da.from_array(np.array([g_rle[g_idx]])))

        dX = da.concatenate(dX, axis=0)
        dy = da.concatenate(dy, axis=0)
        dw = da.concatenate(dw, axis=0)
        dg = da.concatenate(dg, axis=0)
    else:
148
        raise ValueError("Ranking data creation only supported for Dask arrays and dataframes")
149
150
151
152

    return X, y, w, g_rle, dX, dy, dw, dg


153
154
155
def _create_data(objective, n_samples=1_000, output="array", chunk_size=500, **kwargs):
    if objective.endswith("classification"):
        if objective == "binary-classification":
156
            centers = [[-4, -4], [4, 4]]
157
        elif objective == "multiclass-classification":
158
159
160
            centers = [[-4, -4], [4, 4], [-4, 4]]
        else:
            raise ValueError(f"Unknown classification task '{objective}'")
161
        X, y = make_blobs(n_samples=n_samples, centers=centers, random_state=42)
162
    elif objective == "regression":
163
        X, y = make_regression(n_samples=n_samples, n_features=4, n_informative=2, random_state=42)
164
165
    elif objective == "ranking":
        return _create_ranking_data(n_samples=n_samples, output=output, chunk_size=chunk_size, **kwargs)
166
    else:
167
        raise ValueError(f"Unknown objective '{objective}'")
168
169
170
    rnd = np.random.RandomState(42)
    weights = rnd.random(X.shape[0]) * 0.01

171
    if output == "array":
172
173
174
        dX = da.from_array(X, (chunk_size, X.shape[1]))
        dy = da.from_array(y, chunk_size)
        dw = da.from_array(weights, chunk_size)
175
176
177
    elif output.startswith("dataframe"):
        X_df = pd.DataFrame(X, columns=[f"feature_{i}" for i in range(X.shape[1])])
        if output == "dataframe-with-categorical":
178
            num_cat_cols = 2
179
            for i in range(num_cat_cols):
180
                col_name = f"cat_col{i}"
181
182
                cat_values = rnd.choice(["a", "b"], X.shape[0])
                cat_series = pd.Series(cat_values, dtype="category")
183
184
185
                X_df[col_name] = cat_series
                X = np.hstack((X, cat_series.cat.codes.values.reshape(-1, 1)))

186
            # make one categorical feature relevant to the target
187
188
            cat_col_is_a = X_df["cat_col0"] == "a"
            if objective == "regression":
189
                y = np.where(cat_col_is_a, y, 2 * y)
190
            elif objective == "binary-classification":
191
                y = np.where(cat_col_is_a, y, 1 - y)
192
            elif objective == "multiclass-classification":
193
194
                n_classes = 3
                y = np.where(cat_col_is_a, y, (1 + y) % n_classes)
195
        y_df = pd.Series(y, name="target")
196
197
198
        dX = dd.from_pandas(X_df, chunksize=chunk_size)
        dy = dd.from_pandas(y_df, chunksize=chunk_size)
        dw = dd.from_array(weights, chunksize=chunk_size)
199
    elif output == "scipy_csr_matrix":
200
        dX = da.from_array(X, chunks=(chunk_size, X.shape[1])).map_blocks(csr_matrix)
201
202
        dy = da.from_array(y, chunks=chunk_size)
        dw = da.from_array(weights, chunk_size)
203
        X = csr_matrix(X)
204
    elif output == "scipy_csc_matrix":
205
206
207
208
        dX = da.from_array(X, chunks=(chunk_size, X.shape[1])).map_blocks(csc_matrix)
        dy = da.from_array(y, chunks=chunk_size)
        dw = da.from_array(weights, chunk_size)
        X = csc_matrix(X)
209
    else:
210
        raise ValueError(f"Unknown output type '{output}'")
211

212
    return X, y, weights, None, dX, dy, dw, None
213
214


215
def _r2_score(dy_true, dy_pred):
216
217
218
219
220
    y_true = dy_true.compute()
    y_pred = dy_pred.compute()
    numerator = ((y_true - y_pred) ** 2).sum(axis=0)
    denominator = ((y_true - y_true.mean(axis=0)) ** 2).sum(axis=0)
    return 1 - numerator / denominator
221
222
223


def _accuracy_score(dy_true, dy_pred):
224
225
226
    y_true = dy_true.compute()
    y_pred = dy_pred.compute()
    return (y_true == y_pred).mean()
227
228


229
def _constant_metric(y_true, y_pred):
230
    metric_name = "constant_metric"
231
232
233
234
235
    value = 0.708
    is_higher_better = False
    return metric_name, value, is_higher_better


236
237
238
239
240
241
242
243
244
245
246
247
248
def _objective_least_squares(y_true, y_pred):
    grad = y_pred - y_true
    hess = np.ones(len(y_true))
    return grad, hess


def _objective_logistic_regression(y_true, y_pred):
    y_pred = 1.0 / (1.0 + np.exp(-y_pred))
    grad = y_pred - y_true
    hess = y_pred * (1.0 - y_pred)
    return grad, hess


249
250
251
252
@pytest.mark.parametrize("output", data_output)
@pytest.mark.parametrize("task", ["binary-classification", "multiclass-classification"])
@pytest.mark.parametrize("boosting_type", boosting_types)
@pytest.mark.parametrize("tree_learner", distributed_training_algorithms)
253
254
def test_classifier(output, task, boosting_type, tree_learner, cluster):
    with Client(cluster) as client:
255
256
257
258
259
260
261
262
263
264
265
266
        X, y, w, _, dX, dy, dw, _ = _create_data(objective=task, output=output)

        params = {"boosting_type": boosting_type, "tree_learner": tree_learner, "n_estimators": 50, "num_leaves": 31}
        if boosting_type == "rf":
            params.update(
                {
                    "bagging_freq": 1,
                    "bagging_fraction": 0.9,
                }
            )
        elif boosting_type == "goss":
            params["top_rate"] = 0.5
267

268
        dask_classifier = lgb.DaskLGBMClassifier(client=client, time_out=5, **params)
269
270
        dask_classifier = dask_classifier.fit(dX, dy, sample_weight=dw)
        p1 = dask_classifier.predict(dX)
271
272
273
        p1_raw = dask_classifier.predict(dX, raw_score=True).compute()
        p1_first_iter_raw = dask_classifier.predict(dX, start_iteration=0, num_iteration=1, raw_score=True).compute()
        p1_early_stop_raw = dask_classifier.predict(
274
            dX, pred_early_stop=True, pred_early_stop_margin=1.0, pred_early_stop_freq=2, raw_score=True
275
        ).compute()
276
277
278
279
280
281
282
283
284
285
286
287
        p1_proba = dask_classifier.predict_proba(dX).compute()
        p1_pred_leaf = dask_classifier.predict(dX, pred_leaf=True)
        p1_local = dask_classifier.to_local().predict(X)
        s1 = _accuracy_score(dy, p1)
        p1 = p1.compute()

        local_classifier = lgb.LGBMClassifier(**params)
        local_classifier.fit(X, y, sample_weight=w)
        p2 = local_classifier.predict(X)
        p2_proba = local_classifier.predict_proba(X)
        s2 = local_classifier.score(X, y)

288
        if boosting_type == "rf":
289
290
291
292
293
294
295
296
297
298
299
300
            # https://github.com/microsoft/LightGBM/issues/4118
            assert_eq(s1, s2, atol=0.01)
            assert_eq(p1_proba, p2_proba, atol=0.8)
        else:
            assert_eq(s1, s2)
            assert_eq(p1, p2)
            assert_eq(p1, y)
            assert_eq(p2, y)
            assert_eq(p1_proba, p2_proba, atol=0.03)
            assert_eq(p1_local, p2)
            assert_eq(p1_local, y)

301
302
303
304
305
306
307
        # extra predict() parameters should be passed through correctly
        with pytest.raises(AssertionError):
            assert_eq(p1_raw, p1_first_iter_raw)

        with pytest.raises(AssertionError):
            assert_eq(p1_raw, p1_early_stop_raw)

308
309
310
        # pref_leaf values should have the right shape
        # and values that look like valid tree nodes
        pred_leaf_vals = p1_pred_leaf.compute()
311
312
        assert pred_leaf_vals.shape == (X.shape[0], dask_classifier.booster_.num_trees())
        assert np.max(pred_leaf_vals) <= params["num_leaves"]
313
        assert np.min(pred_leaf_vals) >= 0
314
        assert len(np.unique(pred_leaf_vals)) <= params["num_leaves"]
315
316
317

        # be sure LightGBM actually used at least one categorical column,
        # and that it was correctly treated as a categorical feature
318
319
        if output == "dataframe-with-categorical":
            cat_cols = [col for col in dX.columns if dX.dtypes[col].name == "category"]
320
            tree_df = dask_classifier.booster_.trees_to_dataframe()
321
            node_uses_cat_col = tree_df["split_feature"].isin(cat_cols)
322
            assert node_uses_cat_col.sum() > 0
323
            assert tree_df.loc[node_uses_cat_col, "decision_type"].unique()[0] == "=="
324

325

326
327
@pytest.mark.parametrize("output", data_output + ["scipy_csc_matrix"])
@pytest.mark.parametrize("task", ["binary-classification", "multiclass-classification"])
328
329
def test_classifier_pred_contrib(output, task, cluster):
    with Client(cluster) as client:
330
        X, y, w, _, dX, dy, dw, _ = _create_data(objective=task, output=output)
331

332
        params = {"n_estimators": 10, "num_leaves": 10}
333

334
        dask_classifier = lgb.DaskLGBMClassifier(client=client, time_out=5, tree_learner="data", **params)
335
        dask_classifier = dask_classifier.fit(dX, dy, sample_weight=dw)
336
        preds_with_contrib = dask_classifier.predict(dX, pred_contrib=True)
337
338
339
340
341

        local_classifier = lgb.LGBMClassifier(**params)
        local_classifier.fit(X, y, sample_weight=w)
        local_preds_with_contrib = local_classifier.predict(X, pred_contrib=True)

342
343
344
345
346
347
348
349
350
351
352
353
354
        # shape depends on whether it is binary or multiclass classification
        num_features = dask_classifier.n_features_
        num_classes = dask_classifier.n_classes_
        if num_classes == 2:
            expected_num_cols = num_features + 1
        else:
            expected_num_cols = (num_features + 1) * num_classes

        # in the special case of multi-class classification using scipy sparse matrices,
        # the output of `.predict(..., pred_contrib=True)` is a list of sparse matrices (one per class)
        #
        # since that case is so different than all other cases, check the relevant things here
        # and then return early
355
356
        if output.startswith("scipy") and task == "multiclass-classification":
            if output == "scipy_csr_matrix":
357
                expected_type = csr_matrix
358
            elif output == "scipy_csc_matrix":
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
                expected_type = csc_matrix
            else:
                raise ValueError(f"Unrecognized output type: {output}")
            assert isinstance(preds_with_contrib, list)
            assert all(isinstance(arr, da.Array) for arr in preds_with_contrib)
            assert all(isinstance(arr._meta, expected_type) for arr in preds_with_contrib)
            assert len(preds_with_contrib) == num_classes
            assert len(preds_with_contrib) == len(local_preds_with_contrib)
            for i in range(num_classes):
                computed_preds = preds_with_contrib[i].compute()
                assert isinstance(computed_preds, expected_type)
                assert computed_preds.shape[1] == num_classes
                assert computed_preds.shape == local_preds_with_contrib[i].shape
                assert len(np.unique(computed_preds[:, -1])) == 1
                # raw scores will probably be different, but at least check that all predicted classes are the same
                pred_classes = np.argmax(computed_preds.toarray(), axis=1)
                local_pred_classes = np.argmax(local_preds_with_contrib[i].toarray(), axis=1)
                np.testing.assert_array_equal(pred_classes, local_pred_classes)
            return

        preds_with_contrib = preds_with_contrib.compute()
380
        if output.startswith("scipy"):
381
            preds_with_contrib = preds_with_contrib.toarray()
382
383
384

        # be sure LightGBM actually used at least one categorical column,
        # and that it was correctly treated as a categorical feature
385
386
        if output == "dataframe-with-categorical":
            cat_cols = [col for col in dX.columns if dX.dtypes[col].name == "category"]
387
            tree_df = dask_classifier.booster_.trees_to_dataframe()
388
            node_uses_cat_col = tree_df["split_feature"].isin(cat_cols)
389
            assert node_uses_cat_col.sum() > 0
390
            assert tree_df.loc[node_uses_cat_col, "decision_type"].unique()[0] == "=="
391
392
393
394
395
396
397
398
399
400

        # * shape depends on whether it is binary or multiclass classification
        # * matrix for binary classification is of the form [feature_contrib, base_value],
        #   for multi-class it's [feat_contrib_class1, base_value_class1, feat_contrib_class2, base_value_class2, etc.]
        # * contrib outputs for distributed training are different than from local training, so we can just test
        #   that the output has the right shape and base values are in the right position
        assert preds_with_contrib.shape[1] == expected_num_cols
        assert preds_with_contrib.shape == local_preds_with_contrib.shape

        if num_classes == 2:
401
            assert len(np.unique(preds_with_contrib[:, num_features])) == 1
402
403
404
405
406
407
        else:
            for i in range(num_classes):
                base_value_col = num_features * (i + 1) + i
                assert len(np.unique(preds_with_contrib[:, base_value_col]) == 1)


408
409
@pytest.mark.parametrize("output", data_output)
@pytest.mark.parametrize("task", ["binary-classification", "multiclass-classification"])
410
411
412
413
414
415
416
417
418
419
420
421
422
def test_classifier_custom_objective(output, task, cluster):
    with Client(cluster) as client:
        X, y, w, _, dX, dy, dw, _ = _create_data(
            objective=task,
            output=output,
        )

        params = {
            "n_estimators": 50,
            "num_leaves": 31,
            "verbose": -1,
            "seed": 708,
            "deterministic": True,
423
            "force_col_wise": True,
424
425
        }

426
427
428
429
430
431
432
433
434
435
        if task == "binary-classification":
            params.update(
                {
                    "objective": _objective_logistic_regression,
                }
            )
        elif task == "multiclass-classification":
            params.update({"objective": sklearn_multiclass_custom_objective, "num_classes": 3})

        dask_classifier = lgb.DaskLGBMClassifier(client=client, time_out=5, tree_learner="data", **params)
436
437
438
439
440
441
442
443
444
445
        dask_classifier = dask_classifier.fit(dX, dy, sample_weight=dw)
        dask_classifier_local = dask_classifier.to_local()
        p1_raw = dask_classifier.predict(dX, raw_score=True).compute()
        p1_raw_local = dask_classifier_local.predict(X, raw_score=True)

        local_classifier = lgb.LGBMClassifier(**params)
        local_classifier.fit(X, y, sample_weight=w)
        p2_raw = local_classifier.predict(X, raw_score=True)

        # with a custom objective, prediction result is a raw score instead of predicted class
446
        if task == "binary-classification":
447
448
449
450
451
452
            p1_proba = 1.0 / (1.0 + np.exp(-p1_raw))
            p1_class = (p1_proba > 0.5).astype(np.int64)
            p1_proba_local = 1.0 / (1.0 + np.exp(-p1_raw_local))
            p1_class_local = (p1_proba_local > 0.5).astype(np.int64)
            p2_proba = 1.0 / (1.0 + np.exp(-p2_raw))
            p2_class = (p2_proba > 0.5).astype(np.int64)
453
        elif task == "multiclass-classification":
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
            p1_proba = np.exp(p1_raw) / np.sum(np.exp(p1_raw), axis=1).reshape(-1, 1)
            p1_class = p1_proba.argmax(axis=1)
            p1_proba_local = np.exp(p1_raw_local) / np.sum(np.exp(p1_raw_local), axis=1).reshape(-1, 1)
            p1_class_local = p1_proba_local.argmax(axis=1)
            p2_proba = np.exp(p2_raw) / np.sum(np.exp(p2_raw), axis=1).reshape(-1, 1)
            p2_class = p2_proba.argmax(axis=1)

        # function should have been preserved
        assert callable(dask_classifier.objective_)
        assert callable(dask_classifier_local.objective_)

        # should correctly classify every sample
        assert_eq(p1_class, y)
        assert_eq(p1_class_local, y)
        assert_eq(p2_class, y)

        # probability estimates should be similar
        assert_eq(p1_proba, p2_proba, atol=0.03)
        assert_eq(p1_proba, p1_proba_local)


475
def test_machines_to_worker_map_unparseable_host_names():
476
    workers = {"0.0.0.1:80": {}, "0.0.0.2:80": {}}
477
478
479
480
481
    machines = "0.0.0.1:80,0.0.0.2:80"
    with pytest.raises(ValueError, match="Could not parse host name from worker address '0.0.0.1:80'"):
        lgb.dask._machines_to_worker_map(machines=machines, worker_addresses=workers.keys())


482
483
def test_training_does_not_fail_on_port_conflicts(cluster):
    with Client(cluster) as client:
484
        _, _, _, _, dX, dy, dw, _ = _create_data("binary-classification", output="array")
485
486

        lightgbm_default_port = 12400
487
        workers_hostname = _get_workers_hostname(cluster)
488
        with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
489
            s.bind((workers_hostname, lightgbm_default_port))
490
            dask_classifier = lgb.DaskLGBMClassifier(client=client, time_out=5, n_estimators=5, num_leaves=5)
491
492
493
494
495
496
497
            for _ in range(5):
                dask_classifier.fit(
                    X=dX,
                    y=dy,
                    sample_weight=dw,
                )
                assert dask_classifier.booster_
498

499

500
501
502
@pytest.mark.parametrize("output", data_output)
@pytest.mark.parametrize("boosting_type", boosting_types)
@pytest.mark.parametrize("tree_learner", distributed_training_algorithms)
503
504
def test_regressor(output, boosting_type, tree_learner, cluster):
    with Client(cluster) as client:
505
        X, y, w, _, dX, dy, dw, _ = _create_data(objective="regression", output=output)
506
507
508
509
510
511
512

        params = {
            "boosting_type": boosting_type,
            "random_state": 42,
            "num_leaves": 31,
            "n_estimators": 20,
        }
513
514
515
516
517
518
519
        if boosting_type == "rf":
            params.update(
                {
                    "bagging_freq": 1,
                    "bagging_fraction": 0.9,
                }
            )
520

521
        dask_regressor = lgb.DaskLGBMRegressor(client=client, time_out=5, tree=tree_learner, **params)
522
523
524
525
526
527
        dask_regressor = dask_regressor.fit(dX, dy, sample_weight=dw)
        p1 = dask_regressor.predict(dX)
        p1_pred_leaf = dask_regressor.predict(dX, pred_leaf=True)

        s1 = _r2_score(dy, p1)
        p1 = p1.compute()
528
529
        p1_raw = dask_regressor.predict(dX, raw_score=True).compute()
        p1_first_iter_raw = dask_regressor.predict(dX, start_iteration=0, num_iteration=1, raw_score=True).compute()
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
        p1_local = dask_regressor.to_local().predict(X)
        s1_local = dask_regressor.to_local().score(X, y)

        local_regressor = lgb.LGBMRegressor(**params)
        local_regressor.fit(X, y, sample_weight=w)
        s2 = local_regressor.score(X, y)
        p2 = local_regressor.predict(X)

        # Scores should be the same
        assert_eq(s1, s2, atol=0.01)
        assert_eq(s1, s1_local)

        # Predictions should be roughly the same.
        assert_eq(p1, p1_local)

        # pref_leaf values should have the right shape
        # and values that look like valid tree nodes
        pred_leaf_vals = p1_pred_leaf.compute()
548
549
        assert pred_leaf_vals.shape == (X.shape[0], dask_regressor.booster_.num_trees())
        assert np.max(pred_leaf_vals) <= params["num_leaves"]
550
        assert np.min(pred_leaf_vals) >= 0
551
        assert len(np.unique(pred_leaf_vals)) <= params["num_leaves"]
552

553
554
        assert_eq(p1, y, rtol=0.5, atol=50.0)
        assert_eq(p2, y, rtol=0.5, atol=50.0)
555

556
557
558
559
        # extra predict() parameters should be passed through correctly
        with pytest.raises(AssertionError):
            assert_eq(p1_raw, p1_first_iter_raw)

560
561
        # be sure LightGBM actually used at least one categorical column,
        # and that it was correctly treated as a categorical feature
562
563
        if output == "dataframe-with-categorical":
            cat_cols = [col for col in dX.columns if dX.dtypes[col].name == "category"]
564
            tree_df = dask_regressor.booster_.trees_to_dataframe()
565
            node_uses_cat_col = tree_df["split_feature"].isin(cat_cols)
566
            assert node_uses_cat_col.sum() > 0
567
            assert tree_df.loc[node_uses_cat_col, "decision_type"].unique()[0] == "=="
568

569

570
@pytest.mark.parametrize("output", data_output)
571
572
def test_regressor_pred_contrib(output, cluster):
    with Client(cluster) as client:
573
        X, y, w, _, dX, dy, dw, _ = _create_data(objective="regression", output=output)
574

575
        params = {"n_estimators": 10, "num_leaves": 10}
576

577
        dask_regressor = lgb.DaskLGBMRegressor(client=client, time_out=5, tree_learner="data", **params)
578
579
580
581
582
583
584
585
        dask_regressor = dask_regressor.fit(dX, dy, sample_weight=dw)
        preds_with_contrib = dask_regressor.predict(dX, pred_contrib=True).compute()

        local_regressor = lgb.LGBMRegressor(**params)
        local_regressor.fit(X, y, sample_weight=w)
        local_preds_with_contrib = local_regressor.predict(X, pred_contrib=True)

        if output == "scipy_csr_matrix":
586
            preds_with_contrib = preds_with_contrib.toarray()
587
588
589
590
591
592
593
594
595

        # contrib outputs for distributed training are different than from local training, so we can just test
        # that the output has the right shape and base values are in the right position
        num_features = dX.shape[1]
        assert preds_with_contrib.shape[1] == num_features + 1
        assert preds_with_contrib.shape == local_preds_with_contrib.shape

        # be sure LightGBM actually used at least one categorical column,
        # and that it was correctly treated as a categorical feature
596
597
        if output == "dataframe-with-categorical":
            cat_cols = [col for col in dX.columns if dX.dtypes[col].name == "category"]
598
            tree_df = dask_regressor.booster_.trees_to_dataframe()
599
            node_uses_cat_col = tree_df["split_feature"].isin(cat_cols)
600
            assert node_uses_cat_col.sum() > 0
601
            assert tree_df.loc[node_uses_cat_col, "decision_type"].unique()[0] == "=="
602

603

604
605
@pytest.mark.parametrize("output", data_output)
@pytest.mark.parametrize("alpha", [0.1, 0.5, 0.9])
606
607
def test_regressor_quantile(output, alpha, cluster):
    with Client(cluster) as client:
608
        X, y, w, _, dX, dy, dw, _ = _create_data(objective="regression", output=output)
609

610
        params = {"objective": "quantile", "alpha": alpha, "random_state": 42, "n_estimators": 10, "num_leaves": 10}
611

612
        dask_regressor = lgb.DaskLGBMRegressor(client=client, tree_learner_type="data_parallel", **params)
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
        dask_regressor = dask_regressor.fit(dX, dy, sample_weight=dw)
        p1 = dask_regressor.predict(dX).compute()
        q1 = np.count_nonzero(y < p1) / y.shape[0]

        local_regressor = lgb.LGBMRegressor(**params)
        local_regressor.fit(X, y, sample_weight=w)
        p2 = local_regressor.predict(X)
        q2 = np.count_nonzero(y < p2) / y.shape[0]

        # Quantiles should be right
        np.testing.assert_allclose(q1, alpha, atol=0.2)
        np.testing.assert_allclose(q2, alpha, atol=0.2)

        # be sure LightGBM actually used at least one categorical column,
        # and that it was correctly treated as a categorical feature
628
629
        if output == "dataframe-with-categorical":
            cat_cols = [col for col in dX.columns if dX.dtypes[col].name == "category"]
630
            tree_df = dask_regressor.booster_.trees_to_dataframe()
631
            node_uses_cat_col = tree_df["split_feature"].isin(cat_cols)
632
            assert node_uses_cat_col.sum() > 0
633
            assert tree_df.loc[node_uses_cat_col, "decision_type"].unique()[0] == "=="
634

635

636
@pytest.mark.parametrize("output", data_output)
637
638
def test_regressor_custom_objective(output, cluster):
    with Client(cluster) as client:
639
        X, y, w, _, dX, dy, dw, _ = _create_data(objective="regression", output=output)
640

641
        params = {"n_estimators": 10, "num_leaves": 10, "objective": _objective_least_squares}
642

643
        dask_regressor = lgb.DaskLGBMRegressor(client=client, time_out=5, tree_learner="data", **params)
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
        dask_regressor = dask_regressor.fit(dX, dy, sample_weight=dw)
        dask_regressor_local = dask_regressor.to_local()
        p1 = dask_regressor.predict(dX)
        p1_local = dask_regressor_local.predict(X)
        s1_local = dask_regressor_local.score(X, y)
        s1 = _r2_score(dy, p1)
        p1 = p1.compute()

        local_regressor = lgb.LGBMRegressor(**params)
        local_regressor.fit(X, y, sample_weight=w)
        p2 = local_regressor.predict(X)
        s2 = local_regressor.score(X, y)

        # function should have been preserved
        assert callable(dask_regressor.objective_)
        assert callable(dask_regressor_local.objective_)

        # Scores should be the same
        assert_eq(s1, s2, atol=0.01)
        assert_eq(s1, s1_local)

        # local and Dask predictions should be the same
        assert_eq(p1, p1_local)

        # predictions should be better than random
669
        assert_precision = {"rtol": 0.5, "atol": 50.0}
670
671
672
673
        assert_eq(p1, y, **assert_precision)
        assert_eq(p2, y, **assert_precision)


674
675
676
677
@pytest.mark.parametrize("output", ["array", "dataframe", "dataframe-with-categorical"])
@pytest.mark.parametrize("group", [None, group_sizes])
@pytest.mark.parametrize("boosting_type", boosting_types)
@pytest.mark.parametrize("tree_learner", distributed_training_algorithms)
678
679
def test_ranker(output, group, boosting_type, tree_learner, cluster):
    with Client(cluster) as client:
680
        if output == "dataframe-with-categorical":
681
            X, y, w, g, dX, dy, dw, dg = _create_data(
682
                objective="ranking", output=output, group=group, n_features=1, n_informative=1
683
684
            )
        else:
685
            X, y, w, g, dX, dy, dw, dg = _create_data(objective="ranking", output=output, group=group)
686
687

        # rebalance small dask.Array dataset for better performance.
688
        if output == "array":
689
690
691
692
693
694
695
696
697
698
699
700
701
702
            dX = dX.persist()
            dy = dy.persist()
            dw = dw.persist()
            dg = dg.persist()
            _ = wait([dX, dy, dw, dg])
            client.rebalance()

        # use many trees + leaves to overfit, help ensure that Dask data-parallel strategy matches that of
        # serial learner. See https://github.com/microsoft/LightGBM/issues/3292#issuecomment-671288210.
        params = {
            "boosting_type": boosting_type,
            "random_state": 42,
            "n_estimators": 50,
            "num_leaves": 20,
703
            "min_child_samples": 1,
704
        }
705
706
707
708
709
710
711
712
713
        if boosting_type == "rf":
            params.update(
                {
                    "bagging_freq": 1,
                    "bagging_fraction": 0.9,
                }
            )

        dask_ranker = lgb.DaskLGBMRanker(client=client, time_out=5, tree_learner_type=tree_learner, **params)
714
715
716
717
        dask_ranker = dask_ranker.fit(dX, dy, sample_weight=dw, group=dg)
        rnkvec_dask = dask_ranker.predict(dX)
        rnkvec_dask = rnkvec_dask.compute()
        p1_pred_leaf = dask_ranker.predict(dX, pred_leaf=True)
718
719
        p1_raw = dask_ranker.predict(dX, raw_score=True).compute()
        p1_first_iter_raw = dask_ranker.predict(dX, start_iteration=0, num_iteration=1, raw_score=True).compute()
720
        p1_early_stop_raw = dask_ranker.predict(
721
            dX, pred_early_stop=True, pred_early_stop_margin=1.0, pred_early_stop_freq=2, raw_score=True
722
        ).compute()
723
724
725
726
727
728
729
730
731
732
733
734
735
        rnkvec_dask_local = dask_ranker.to_local().predict(X)

        local_ranker = lgb.LGBMRanker(**params)
        local_ranker.fit(X, y, sample_weight=w, group=g)
        rnkvec_local = local_ranker.predict(X)

        # distributed ranker should be able to rank decently well and should
        # have high rank correlation with scores from serial ranker.
        dcor = spearmanr(rnkvec_dask, y).correlation
        assert dcor > 0.6
        assert spearmanr(rnkvec_dask, rnkvec_local).correlation > 0.8
        assert_eq(rnkvec_dask, rnkvec_dask_local)

736
737
738
739
        # extra predict() parameters should be passed through correctly
        with pytest.raises(AssertionError):
            assert_eq(p1_raw, p1_first_iter_raw)

740
741
742
        with pytest.raises(AssertionError):
            assert_eq(p1_raw, p1_early_stop_raw)

743
744
745
        # pref_leaf values should have the right shape
        # and values that look like valid tree nodes
        pred_leaf_vals = p1_pred_leaf.compute()
746
747
        assert pred_leaf_vals.shape == (X.shape[0], dask_ranker.booster_.num_trees())
        assert np.max(pred_leaf_vals) <= params["num_leaves"]
748
        assert np.min(pred_leaf_vals) >= 0
749
        assert len(np.unique(pred_leaf_vals)) <= params["num_leaves"]
750

751
752
        # be sure LightGBM actually used at least one categorical column,
        # and that it was correctly treated as a categorical feature
753
754
        if output == "dataframe-with-categorical":
            cat_cols = [col for col in dX.columns if dX.dtypes[col].name == "category"]
755
            tree_df = dask_ranker.booster_.trees_to_dataframe()
756
            node_uses_cat_col = tree_df["split_feature"].isin(cat_cols)
757
            assert node_uses_cat_col.sum() > 0
758
            assert tree_df.loc[node_uses_cat_col, "decision_type"].unique()[0] == "=="
759

760

761
@pytest.mark.parametrize("output", ["array", "dataframe", "dataframe-with-categorical"])
762
763
def test_ranker_custom_objective(output, cluster):
    with Client(cluster) as client:
764
        if output == "dataframe-with-categorical":
765
            X, y, w, g, dX, dy, dw, dg = _create_data(
766
                objective="ranking", output=output, group=group_sizes, n_features=1, n_informative=1
767
768
            )
        else:
769
            X, y, w, g, dX, dy, dw, dg = _create_data(objective="ranking", output=output, group=group_sizes)
770
771

        # rebalance small dask.Array dataset for better performance.
772
        if output == "array":
773
774
775
776
777
778
779
780
781
782
783
784
            dX = dX.persist()
            dy = dy.persist()
            dw = dw.persist()
            dg = dg.persist()
            _ = wait([dX, dy, dw, dg])
            client.rebalance()

        params = {
            "random_state": 42,
            "n_estimators": 50,
            "num_leaves": 20,
            "min_child_samples": 1,
785
            "objective": _objective_least_squares,
786
787
        }

788
        dask_ranker = lgb.DaskLGBMRanker(client=client, time_out=5, tree_learner_type="data", **params)
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
        dask_ranker = dask_ranker.fit(dX, dy, sample_weight=dw, group=dg)
        rnkvec_dask = dask_ranker.predict(dX).compute()
        dask_ranker_local = dask_ranker.to_local()
        rnkvec_dask_local = dask_ranker_local.predict(X)

        local_ranker = lgb.LGBMRanker(**params)
        local_ranker.fit(X, y, sample_weight=w, group=g)
        rnkvec_local = local_ranker.predict(X)

        # distributed ranker should be able to rank decently well with the least-squares objective
        # and should have high rank correlation with scores from serial ranker.
        assert spearmanr(rnkvec_dask, y).correlation > 0.6
        assert spearmanr(rnkvec_dask, rnkvec_local).correlation > 0.8
        assert_eq(rnkvec_dask, rnkvec_dask_local)

        # function should have been preserved
        assert callable(dask_ranker.objective_)
        assert callable(dask_ranker_local.objective_)


809
810
811
812
@pytest.mark.parametrize("task", tasks)
@pytest.mark.parametrize("output", data_output)
@pytest.mark.parametrize("eval_sizes", [[0.5, 1, 1.5], [0]])
@pytest.mark.parametrize("eval_names_prefix", ["specified", None])
813
def test_eval_set_no_early_stopping(task, output, eval_sizes, eval_names_prefix, cluster):
814
815
    if task == "ranking" and output == "scipy_csr_matrix":
        pytest.skip("LGBMRanker is not currently tested on sparse matrices")
816
817
818
819
820
821
822
823
824
825
826
827
828

    with Client(cluster) as client:
        # Use larger trainset to prevent premature stopping due to zero loss, causing num_trees() < n_estimators.
        # Use small chunk_size to avoid single-worker allocation of eval data partitions.
        n_samples = 1000
        chunk_size = 10
        n_eval_sets = len(eval_sizes)
        eval_set = []
        eval_sample_weight = []
        eval_class_weight = None
        eval_init_score = None

        if eval_names_prefix:
829
            eval_names = [f"{eval_names_prefix}_{i}" for i in range(len(eval_sizes))]
830
831
832
833
        else:
            eval_names = None

        X, y, w, g, dX, dy, dw, dg = _create_data(
834
            objective=task, n_samples=n_samples, output=output, chunk_size=chunk_size
835
836
        )

837
838
        if task == "ranking":
            eval_metrics = ["ndcg"]
839
            eval_at = (5, 6)
840
            eval_metric_names = [f"ndcg@{k}" for k in eval_at]
841
842
843
844
            eval_group = []
        else:
            # test eval_class_weight, eval_init_score on binary-classification task.
            # Note: objective's default `metric` will be evaluated in evals_result_ in addition to all eval_metrics.
845
846
847
            if task == "binary-classification":
                eval_metrics = ["binary_error", "auc"]
                eval_metric_names = ["binary_logloss", "binary_error", "auc"]
848
849
                eval_class_weight = []
                eval_init_score = []
850
851
852
853
854
855
            elif task == "multiclass-classification":
                eval_metrics = ["multi_error"]
                eval_metric_names = ["multi_logloss", "multi_error"]
            elif task == "regression":
                eval_metrics = ["l1"]
                eval_metric_names = ["l2", "l1"]
856
857
858
859
860
861
862
863
864
865
866
867

        # create eval_sets by creating new datasets or copying training data.
        for eval_size in eval_sizes:
            if eval_size == 1:
                y_e = y
                dX_e = dX
                dy_e = dy
                dw_e = dw
                dg_e = dg
            else:
                n_eval_samples = max(chunk_size, int(n_samples * eval_size))
                _, y_e, _, _, dX_e, dy_e, dw_e, dg_e = _create_data(
868
                    objective=task, n_samples=n_eval_samples, output=output, chunk_size=chunk_size
869
870
871
872
                )

            eval_set.append((dX_e, dy_e))
            eval_sample_weight.append(dw_e)
873
            if task == "ranking":
874
875
                eval_group.append(dg_e)

876
            if task == "binary-classification":
877
878
879
880
                n_neg = np.sum(y_e == 0)
                n_pos = np.sum(y_e == 1)
                eval_class_weight.append({0: n_neg / n_pos, 1: n_pos / n_neg})
                init_score_value = np.log(np.mean(y_e) / (1 - np.mean(y_e)))
881
                if "dataframe" in output:
882
                    d_init_score = dy_e.map_partitions(lambda x, val=init_score_value: pd.Series([val] * x.size))
883
                else:
884
                    d_init_score = dy_e.map_blocks(lambda x, val=init_score_value: np.repeat(val, x.size))
885
886
887
888

                eval_init_score.append(d_init_score)

        fit_trees = 50
889
        params = {"random_state": 42, "n_estimators": fit_trees, "num_leaves": 2}
890
891

        model_factory = task_to_dask_factory[task]
892
        dask_model = model_factory(client=client, **params)
893
894

        fit_params = {
895
896
897
898
899
900
901
            "X": dX,
            "y": dy,
            "eval_set": eval_set,
            "eval_names": eval_names,
            "eval_sample_weight": eval_sample_weight,
            "eval_init_score": eval_init_score,
            "eval_metric": eval_metrics,
902
        }
903
904
905
906
        if task == "ranking":
            fit_params.update({"group": dg, "eval_group": eval_group, "eval_at": eval_at})
        elif task == "binary-classification":
            fit_params.update({"eval_class_weight": eval_class_weight})
907
908

        if eval_sizes == [0]:
909
910
911
912
            with pytest.warns(
                UserWarning,
                match="Worker (.*) was not allocated eval_set data. Therefore evals_result_ and best_score_ data may be unreliable.",
            ):
913
914
915
916
917
                dask_model.fit(**fit_params)
        else:
            dask_model = dask_model.fit(**fit_params)

            # total number of trees scales up for ova classifier.
918
            if task == "multiclass-classification":
919
920
921
922
923
924
                model_trees = fit_trees * dask_model.n_classes_
            else:
                model_trees = fit_trees

            # check that early stopping was not applied.
            assert dask_model.booster_.num_trees() == model_trees
925
            assert dask_model.best_iteration_ == 0
926
927
928
929
930
931
932
933
934
935
936
937
938
939

            # checks that evals_result_ and best_score_ contain expected data and eval_set names.
            evals_result = dask_model.evals_result_
            best_scores = dask_model.best_score_
            assert len(evals_result) == n_eval_sets
            assert len(best_scores) == n_eval_sets

            for eval_name in evals_result:
                assert eval_name in dask_model.best_score_
                if eval_names:
                    assert eval_name in eval_names

                # check that each eval_name and metric exists for all eval sets, allowing for the
                # case when a worker receives a fully-padded eval_set component which is not evaluated.
940
                if evals_result[eval_name] != {}:
941
942
943
944
945
946
                    for metric in eval_metric_names:
                        assert metric in evals_result[eval_name]
                        assert metric in best_scores[eval_name]
                        assert len(evals_result[eval_name][metric]) == fit_trees


947
@pytest.mark.parametrize("task", ["binary-classification", "regression", "ranking"])
948
949
950
951
952
def test_eval_set_with_custom_eval_metric(task, cluster):
    with Client(cluster) as client:
        n_samples = 1000
        n_eval_samples = int(n_samples * 0.5)
        chunk_size = 10
953
        output = "array"
954
955

        X, y, w, g, dX, dy, dw, dg = _create_data(
956
            objective=task, n_samples=n_samples, output=output, chunk_size=chunk_size
957
958
        )
        _, _, _, _, dX_e, dy_e, _, dg_e = _create_data(
959
            objective=task, n_samples=n_eval_samples, output=output, chunk_size=chunk_size
960
961
        )

962
        if task == "ranking":
963
            eval_at = (5, 6)
964
965
966
967
968
            eval_metrics = ["ndcg", _constant_metric]
            eval_metric_names = [f"ndcg@{k}" for k in eval_at] + ["constant_metric"]
        elif task == "binary-classification":
            eval_metrics = ["binary_error", "auc", _constant_metric]
            eval_metric_names = ["binary_logloss", "binary_error", "auc", "constant_metric"]
969
        else:
970
971
            eval_metrics = ["l1", _constant_metric]
            eval_metric_names = ["l2", "l1", "constant_metric"]
972
973

        fit_trees = 50
974
        params = {"random_state": 42, "n_estimators": fit_trees, "num_leaves": 2}
975
        model_factory = task_to_dask_factory[task]
976
        dask_model = model_factory(client=client, **params)
977
978

        eval_set = [(dX_e, dy_e)]
979
980
981
        fit_params = {"X": dX, "y": dy, "eval_set": eval_set, "eval_metric": eval_metrics}
        if task == "ranking":
            fit_params.update({"group": dg, "eval_group": [dg_e], "eval_at": eval_at})
982
983
984

        dask_model = dask_model.fit(**fit_params)

985
        eval_name = "valid_0"
986
987
988
989
990
991
992
993
        evals_result = dask_model.evals_result_
        assert len(evals_result) == 1
        assert eval_name in evals_result

        for metric in eval_metric_names:
            assert metric in evals_result[eval_name]
            assert len(evals_result[eval_name][metric]) == fit_trees

994
        np.testing.assert_allclose(evals_result[eval_name]["constant_metric"], 0.708)
995
996


997
@pytest.mark.parametrize("task", tasks)
998
999
def test_training_works_if_client_not_provided_or_set_after_construction(task, cluster):
    with Client(cluster) as client:
1000
        _, _, _, _, dX, dy, _, dg = _create_data(objective=task, output="array", group=None)
1001
1002
        model_factory = task_to_dask_factory[task]

1003
        params = {"time_out": 5, "n_estimators": 1, "num_leaves": 2}
1004
1005
1006
1007

        # should be able to use the class without specifying a client
        dask_model = model_factory(**params)
        assert dask_model.client is None
1008
        with pytest.raises(lgb.compat.LGBMNotFittedError, match="Cannot access property client_ before calling fit"):
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
            dask_model.client_

        dask_model.fit(dX, dy, group=dg)
        assert dask_model.fitted_
        assert dask_model.client is None
        assert dask_model.client_ == client

        preds = dask_model.predict(dX)
        assert isinstance(preds, da.Array)
        assert dask_model.fitted_
        assert dask_model.client is None
        assert dask_model.client_ == client

        local_model = dask_model.to_local()
        with pytest.raises(AttributeError):
            local_model.client
            local_model.client_

        # should be able to set client after construction
        dask_model = model_factory(**params)
        dask_model.set_params(client=client)
        assert dask_model.client == client

1032
        with pytest.raises(lgb.compat.LGBMNotFittedError, match="Cannot access property client_ before calling fit"):
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
            dask_model.client_

        dask_model.fit(dX, dy, group=dg)
        assert dask_model.fitted_
        assert dask_model.client == client
        assert dask_model.client_ == client

        preds = dask_model.predict(dX)
        assert isinstance(preds, da.Array)
        assert dask_model.fitted_
        assert dask_model.client == client
        assert dask_model.client_ == client

        local_model = dask_model.to_local()
        with pytest.raises(AttributeError):
            local_model.client
            local_model.client_
1050
1051


1052
1053
1054
1055
1056
1057
@pytest.mark.parametrize("serializer", ["pickle", "joblib", "cloudpickle"])
@pytest.mark.parametrize("task", tasks)
@pytest.mark.parametrize("set_client", [True, False])
def test_model_and_local_version_are_picklable_whether_or_not_client_set_explicitly(
    serializer, task, set_client, tmp_path, cluster, cluster2
):
1058
    with Client(cluster) as client1:
1059
        # data on cluster1
1060
        X_1, _, _, _, dX_1, dy_1, _, dg_1 = _create_data(objective=task, output="array", group=None)
1061

1062
        with Client(cluster2) as client2:
1063
            # create identical data on cluster2
1064
            X_2, _, _, _, dX_2, dy_2, _, dg_2 = _create_data(objective=task, output="array", group=None)
1065

1066
1067
            model_factory = task_to_dask_factory[task]

1068
            params = {"time_out": 5, "n_estimators": 1, "num_leaves": 2}
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081

            # at this point, the result of default_client() is client2 since it was the most recently
            # created. So setting client to client1 here to test that you can select a non-default client
            assert default_client() == client2
            if set_client:
                params.update({"client": client1})

            # unfitted model should survive pickling round trip, and pickling
            # shouldn't have side effects on the model object
            dask_model = model_factory(**params)
            local_model = dask_model.to_local()
            if set_client:
                assert dask_model.client == client1
1082
            else:
1083
1084
                assert dask_model.client is None

1085
1086
1087
            with pytest.raises(
                lgb.compat.LGBMNotFittedError, match="Cannot access property client_ before calling fit"
            ):
1088
1089
1090
1091
1092
                dask_model.client_

            assert "client" not in local_model.get_params()
            assert getattr(local_model, "client", None) is None

1093
            tmp_file = tmp_path / "model-1.pkl"
1094
1095
            pickle_obj(obj=dask_model, filepath=tmp_file, serializer=serializer)
            model_from_disk = unpickle_obj(filepath=tmp_file, serializer=serializer)
1096

1097
            local_tmp_file = tmp_path / "local-model-1.pkl"
1098
1099
            pickle_obj(obj=local_model, filepath=local_tmp_file, serializer=serializer)
            local_model_from_disk = unpickle_obj(filepath=local_tmp_file, serializer=serializer)
1100
1101
1102
1103
1104
1105
1106
1107

            assert model_from_disk.client is None

            if set_client:
                assert dask_model.client == client1
            else:
                assert dask_model.client is None

1108
1109
1110
            with pytest.raises(
                lgb.compat.LGBMNotFittedError, match="Cannot access property client_ before calling fit"
            ):
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
                dask_model.client_

            # client will always be None after unpickling
            if set_client:
                from_disk_params = model_from_disk.get_params()
                from_disk_params.pop("client", None)
                dask_params = dask_model.get_params()
                dask_params.pop("client", None)
                assert from_disk_params == dask_params
            else:
                assert model_from_disk.get_params() == dask_model.get_params()
            assert local_model_from_disk.get_params() == local_model.get_params()

            # fitted model should survive pickling round trip, and pickling
            # shouldn't have side effects on the model object
            if set_client:
                dask_model.fit(dX_1, dy_1, group=dg_1)
            else:
                dask_model.fit(dX_2, dy_2, group=dg_2)
            local_model = dask_model.to_local()

            assert "client" not in local_model.get_params()
            with pytest.raises(AttributeError):
                local_model.client
                local_model.client_

1137
            tmp_file2 = tmp_path / "model-2.pkl"
1138
1139
            pickle_obj(obj=dask_model, filepath=tmp_file2, serializer=serializer)
            fitted_model_from_disk = unpickle_obj(filepath=tmp_file2, serializer=serializer)
1140

1141
            local_tmp_file2 = tmp_path / "local-model-2.pkl"
1142
1143
            pickle_obj(obj=local_model, filepath=local_tmp_file2, serializer=serializer)
            local_fitted_model_from_disk = unpickle_obj(filepath=local_tmp_file2, serializer=serializer)
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181

            if set_client:
                assert dask_model.client == client1
                assert dask_model.client_ == client1
            else:
                assert dask_model.client is None
                assert dask_model.client_ == default_client()
                assert dask_model.client_ == client2

            assert isinstance(fitted_model_from_disk, model_factory)
            assert fitted_model_from_disk.client is None
            assert fitted_model_from_disk.client_ == default_client()
            assert fitted_model_from_disk.client_ == client2

            # client will always be None after unpickling
            if set_client:
                from_disk_params = fitted_model_from_disk.get_params()
                from_disk_params.pop("client", None)
                dask_params = dask_model.get_params()
                dask_params.pop("client", None)
                assert from_disk_params == dask_params
            else:
                assert fitted_model_from_disk.get_params() == dask_model.get_params()
            assert local_fitted_model_from_disk.get_params() == local_model.get_params()

            if set_client:
                preds_orig = dask_model.predict(dX_1).compute()
                preds_loaded_model = fitted_model_from_disk.predict(dX_1).compute()
                preds_orig_local = local_model.predict(X_1)
                preds_loaded_model_local = local_fitted_model_from_disk.predict(X_1)
            else:
                preds_orig = dask_model.predict(dX_2).compute()
                preds_loaded_model = fitted_model_from_disk.predict(dX_2).compute()
                preds_orig_local = local_model.predict(X_2)
                preds_loaded_model_local = local_fitted_model_from_disk.predict(X_2)

            assert_eq(preds_orig, preds_loaded_model)
            assert_eq(preds_orig_local, preds_loaded_model_local)
1182
1183


1184
1185
1186
1187
1188
def test_warns_and_continues_on_unrecognized_tree_learner(cluster):
    with Client(cluster) as client:
        X = da.random.random((1e3, 10))
        y = da.random.random((1e3, 1))
        dask_regressor = lgb.DaskLGBMRegressor(
1189
            client=client, time_out=5, tree_learner="some-nonsense-value", n_estimators=1, num_leaves=2
1190
        )
1191
        with pytest.warns(UserWarning, match="Parameter tree_learner set to some-nonsense-value"):
1192
            dask_regressor = dask_regressor.fit(X, y)
1193

1194
        assert dask_regressor.fitted_
1195

1196

1197
@pytest.mark.parametrize("tree_learner", ["data_parallel", "voting_parallel"])
1198
1199
def test_training_respects_tree_learner_aliases(tree_learner, cluster):
    with Client(cluster) as client:
1200
1201
        task = "regression"
        _, _, _, _, dX, dy, dw, dg = _create_data(objective=task, output="array")
1202
        dask_factory = task_to_dask_factory[task]
1203
        dask_model = dask_factory(client=client, tree_learner=tree_learner, time_out=5, n_estimators=10, num_leaves=15)
1204
1205
1206
        dask_model.fit(dX, dy, sample_weight=dw, group=dg)

        assert dask_model.fitted_
1207
        assert dask_model.get_params()["tree_learner"] == tree_learner
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217


def test_error_on_feature_parallel_tree_learner(cluster):
    with Client(cluster) as client:
        X = da.random.random((100, 10), chunks=(50, 10))
        y = da.random.random(100, chunks=50)
        X, y = client.persist([X, y])
        _ = wait([X, y])
        client.rebalance()
        dask_regressor = lgb.DaskLGBMRegressor(
1218
            client=client, time_out=5, tree_learner="feature_parallel", n_estimators=1, num_leaves=2
1219
        )
1220
        with pytest.raises(lgb.basic.LightGBMError, match="Do not support feature parallel in c api"):
1221
1222
1223
1224
1225
            dask_regressor = dask_regressor.fit(X, y)


def test_errors(cluster):
    with Client(cluster) as client:
1226

1227
        def f(part):
1228
            raise Exception("foo")
1229
1230
1231
1232

        df = dd.demo.make_timeseries()
        df = df.map_partitions(f, meta=df._meta)
        with pytest.raises(Exception) as info:
1233
1234
            lgb.dask._train(client=client, data=df, label=df.x, params={}, model_factory=lgb.LGBMClassifier)
            assert "foo" in str(info.value)
1235
1236


1237
1238
@pytest.mark.parametrize("task", tasks)
@pytest.mark.parametrize("output", data_output)
1239
def test_training_succeeds_even_if_some_workers_do_not_have_any_data(task, output, cluster_three_workers):
1240
1241
    if task == "ranking" and output == "scipy_csr_matrix":
        pytest.skip("LGBMRanker is not currently tested on sparse matrices")
1242

1243
1244
    with Client(cluster_three_workers) as client:
        _, y, _, _, dX, dy, dw, dg = _create_data(
1245
1246
            objective=task,
            output=output,
1247
1248
1249
            group=None,
            n_samples=1_000,
            chunk_size=200,
1250
1251
1252
1253
        )

        dask_model_factory = task_to_dask_factory[task]

1254
        workers = list(client.scheduler_info()["workers"].keys())
1255
1256
1257
1258
1259
1260
1261
        assert len(workers) == 3
        first_two_workers = workers[:2]

        dX = client.persist(dX, workers=first_two_workers)
        dy = client.persist(dy, workers=first_two_workers)
        dw = client.persist(dw, workers=first_two_workers)
        wait([dX, dy, dw])
1262

1263
1264
1265
1266
1267
1268
        workers_with_data = set()
        for coll in (dX, dy, dw):
            for with_data in client.who_has(coll).values():
                workers_with_data.update(with_data)
                assert workers[2] not in with_data
        assert len(workers_with_data) == 2
1269
1270

        params = {
1271
1272
1273
1274
            "time_out": 5,
            "random_state": 42,
            "num_leaves": 10,
            "n_estimators": 20,
1275
1276
        }

1277
        dask_model = dask_model_factory(tree="data", client=client, **params)
1278
1279
        dask_model.fit(dX, dy, group=dg, sample_weight=dw)
        dask_preds = dask_model.predict(dX).compute()
1280
        if task == "regression":
1281
            score = r2_score(y, dask_preds)
1282
        elif task.endswith("classification"):
1283
            score = accuracy_score(y, dask_preds)
1284
        else:
1285
1286
            score = spearmanr(dask_preds, y).correlation
        assert score > 0.9
1287
1288


1289
@pytest.mark.parametrize("task", tasks)
1290
1291
def test_network_params_not_required_but_respected_if_given(task, listen_port, cluster):
    with Client(cluster) as client:
1292
        _, _, _, _, dX, dy, _, dg = _create_data(objective=task, output="array", chunk_size=10, group=None)
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306

        dask_model_factory = task_to_dask_factory[task]

        # rebalance data to be sure that each worker has a piece of the data
        client.rebalance()

        # model 1 - no network parameters given
        dask_model1 = dask_model_factory(
            n_estimators=5,
            num_leaves=5,
        )
        dask_model1.fit(dX, dy, group=dg)
        assert dask_model1.fitted_
        params = dask_model1.get_params()
1307
1308
        assert "local_listen_port" not in params
        assert "machines" not in params
1309
1310

        # model 2 - machines given
1311
        workers = list(client.scheduler_info()["workers"])
1312
        workers_hostname = _get_workers_hostname(cluster)
1313
1314
1315
        remote_sockets, open_ports = lgb.dask._assign_open_ports_to_workers(client, workers)
        for s in remote_sockets.values():
            s.release()
1316
1317
1318
        dask_model2 = dask_model_factory(
            n_estimators=5,
            num_leaves=5,
1319
            machines=",".join([f"{workers_hostname}:{port}" for port in open_ports.values()]),
1320
1321
1322
1323
1324
        )

        dask_model2.fit(dX, dy, group=dg)
        assert dask_model2.fitted_
        params = dask_model2.get_params()
1325
1326
        assert "local_listen_port" not in params
        assert "machines" in params
1327
1328
1329
1330

        # model 3 - local_listen_port given
        # training should fail because LightGBM will try to use the same
        # port for multiple worker processes on the same machine
1331
        dask_model3 = dask_model_factory(n_estimators=5, num_leaves=5, local_listen_port=listen_port)
1332
1333
1334
        error_msg = "has multiple Dask worker processes running on it"
        with pytest.raises(lgb.basic.LightGBMError, match=error_msg):
            dask_model3.fit(dX, dy, group=dg)
1335
1336


1337
@pytest.mark.parametrize("task", tasks)
1338
def test_machines_should_be_used_if_provided(task, cluster):
1339
    pytest.skip("skipping due to timeout issues discussed in https://github.com/microsoft/LightGBM/issues/5390")
1340
    with Client(cluster) as client:
1341
        _, _, _, _, dX, dy, _, dg = _create_data(objective=task, output="array", chunk_size=10, group=None)
1342
1343

        dask_model_factory = task_to_dask_factory[task]
1344
1345

        # rebalance data to be sure that each worker has a piece of the data
1346
        client.rebalance()
1347

1348
        n_workers = len(client.scheduler_info()["workers"])
1349
        assert n_workers > 1
1350
        workers_hostname = _get_workers_hostname(cluster)
1351
        open_ports = lgb.dask._find_n_open_ports(n_workers)
1352
1353
1354
        dask_model = dask_model_factory(
            n_estimators=5,
            num_leaves=5,
1355
            machines=",".join([f"{workers_hostname}:{port}" for port in open_ports]),
1356
1357
1358
1359
        )

        # test that "machines" is actually respected by creating a socket that uses
        # one of the ports mentioned in "machines"
1360
        error_msg = f"Binding port {open_ports[0]} failed"
1361
1362
        with pytest.raises(lgb.basic.LightGBMError, match=error_msg):
            with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
1363
                s.bind((workers_hostname, open_ports[0]))
1364
                dask_model.fit(dX, dy, group=dg)
1365

1366
1367
1368
        # The above error leaves a worker waiting
        client.restart()

1369
        # an informative error should be raised if "machines" has duplicates
1370
        one_open_port = lgb.dask._find_n_open_ports(1)
1371
        dask_model.set_params(machines=",".join([f"127.0.0.1:{one_open_port}" for _ in range(n_workers)]))
1372
1373
1374
        with pytest.raises(ValueError, match="Found duplicates in 'machines'"):
            dask_model.fit(dX, dy, group=dg)

1375

1376
1377
1378
1379
1380
@pytest.mark.parametrize(
    "classes",
    [
        (lgb.DaskLGBMClassifier, lgb.LGBMClassifier),
        (lgb.DaskLGBMRegressor, lgb.LGBMRegressor),
1381
1382
        (lgb.DaskLGBMRanker, lgb.LGBMRanker),
    ],
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
)
def test_dask_classes_and_sklearn_equivalents_have_identical_constructors_except_client_arg(classes):
    dask_spec = inspect.getfullargspec(classes[0])
    sklearn_spec = inspect.getfullargspec(classes[1])
    assert dask_spec.varargs == sklearn_spec.varargs
    assert dask_spec.varkw == sklearn_spec.varkw
    assert dask_spec.kwonlyargs == sklearn_spec.kwonlyargs
    assert dask_spec.kwonlydefaults == sklearn_spec.kwonlydefaults

    # "client" should be the only different, and the final argument
    assert dask_spec.args[:-1] == sklearn_spec.args
    assert dask_spec.defaults[:-1] == sklearn_spec.defaults
1395
    assert dask_spec.args[-1] == "client"
1396
    assert dask_spec.defaults[-1] is None
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407


@pytest.mark.parametrize(
    "methods",
    [
        (lgb.DaskLGBMClassifier.fit, lgb.LGBMClassifier.fit),
        (lgb.DaskLGBMClassifier.predict, lgb.LGBMClassifier.predict),
        (lgb.DaskLGBMClassifier.predict_proba, lgb.LGBMClassifier.predict_proba),
        (lgb.DaskLGBMRegressor.fit, lgb.LGBMRegressor.fit),
        (lgb.DaskLGBMRegressor.predict, lgb.LGBMRegressor.predict),
        (lgb.DaskLGBMRanker.fit, lgb.LGBMRanker.fit),
1408
1409
        (lgb.DaskLGBMRanker.predict, lgb.LGBMRanker.predict),
    ],
1410
1411
1412
1413
1414
1415
)
def test_dask_methods_and_sklearn_equivalents_have_similar_signatures(methods):
    dask_spec = inspect.getfullargspec(methods[0])
    sklearn_spec = inspect.getfullargspec(methods[1])
    dask_params = inspect.signature(methods[0]).parameters
    sklearn_params = inspect.signature(methods[1]).parameters
1416
    assert dask_spec.args == sklearn_spec.args[: len(dask_spec.args)]
1417
1418
    assert dask_spec.varargs == sklearn_spec.varargs
    if sklearn_spec.varkw:
1419
        assert dask_spec.varkw == sklearn_spec.varkw[: len(dask_spec.varkw)]
1420
1421
1422
1423
1424
    assert dask_spec.kwonlyargs == sklearn_spec.kwonlyargs
    assert dask_spec.kwonlydefaults == sklearn_spec.kwonlydefaults
    for param in dask_spec.args:
        error_msg = f"param '{param}' has different default values in the methods"
        assert dask_params[param].default == sklearn_params[param].default, error_msg
1425
1426


1427
@pytest.mark.parametrize("task", tasks)
1428
def test_training_succeeds_when_data_is_dataframe_and_label_is_column_array(task, cluster):
1429
    with Client(cluster):
1430
        _, _, _, _, dX, dy, dw, dg = _create_data(objective=task, output="dataframe", group=None)
1431
1432
1433
1434
1435
1436
1437

        model_factory = task_to_dask_factory[task]

        dy = dy.to_dask_array(lengths=True)
        dy_col_array = dy.reshape(-1, 1)
        assert len(dy_col_array.shape) == 2 and dy_col_array.shape[1] == 1

1438
        params = {"n_estimators": 1, "num_leaves": 3, "random_state": 0, "time_out": 5}
1439
1440
1441
        model = model_factory(**params)
        model.fit(dX, dy_col_array, sample_weight=dw, group=dg)
        assert model.fitted_
1442
1443


1444
1445
@pytest.mark.parametrize("task", tasks)
@pytest.mark.parametrize("output", data_output)
1446
def test_init_score(task, output, cluster):
1447
1448
    if task == "ranking" and output == "scipy_csr_matrix":
        pytest.skip("LGBMRanker is not currently tested on sparse matrices")
1449

1450
    with Client(cluster) as client:
1451
        _, _, _, _, dX, dy, dw, dg = _create_data(objective=task, output=output, group=None)
1452

1453
1454
        model_factory = task_to_dask_factory[task]

1455
        params = {"n_estimators": 1, "num_leaves": 2, "time_out": 5}
1456
1457
        init_score = random.random()
        size_factor = 1
1458
        if task == "multiclass-classification":
1459
1460
            size_factor = 3  # number of classes

1461
        if output.startswith("dataframe"):
1462
            init_scores = dy.map_partitions(lambda x: pd.DataFrame([[init_score] * size_factor] * x.size))
1463
        else:
1464
            init_scores = dy.map_blocks(lambda x: np.full((x.size, size_factor), init_score))
1465
1466
1467
        model = model_factory(client=client, **params)
        model.fit(dX, dy, sample_weight=dw, init_score=init_scores, group=dg)
        # value of the root node is 0 when init_score is set
1468
        assert model.booster_.trees_to_dataframe()["value"][0] == 0
1469
1470


1471
def sklearn_checks_to_run():
1472
    check_names = ["check_estimator_get_tags_default_keys", "check_get_params_invariance", "check_set_params"]
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
    for check_name in check_names:
        check_func = getattr(sklearn_checks, check_name, None)
        if check_func:
            yield check_func


def _tested_estimators():
    for Estimator in [lgb.DaskLGBMClassifier, lgb.DaskLGBMRegressor]:
        yield Estimator()


@pytest.mark.parametrize("estimator", _tested_estimators())
@pytest.mark.parametrize("check", sklearn_checks_to_run())
1486
def test_sklearn_integration(estimator, check, cluster):
1487
    with Client(cluster):
1488
1489
1490
        estimator.set_params(local_listen_port=18000, time_out=5)
        name = type(estimator).__name__
        check(name, estimator)
1491
1492
1493
1494
1495


# this test is separate because it takes a not-yet-constructed estimator
@pytest.mark.parametrize("estimator", list(_tested_estimators()))
def test_parameters_default_constructible(estimator):
1496
    name = estimator.__class__.__name__
1497
    Estimator = estimator
1498
    sklearn_checks.check_parameters_default_constructible(name, Estimator)
1499
1500


1501
1502
@pytest.mark.parametrize("task", tasks)
@pytest.mark.parametrize("output", data_output)
1503
def test_predict_with_raw_score(task, output, cluster):
1504
1505
    if task == "ranking" and output == "scipy_csr_matrix":
        pytest.skip("LGBMRanker is not currently tested on sparse matrices")
1506

1507
    with Client(cluster) as client:
1508
        _, _, _, _, dX, dy, _, dg = _create_data(objective=task, output=output, group=None)
1509

1510
        model_factory = task_to_dask_factory[task]
1511
        params = {"client": client, "n_estimators": 1, "num_leaves": 2, "time_out": 5, "min_sum_hessian": 0}
1512
1513
1514
1515
1516
1517
        model = model_factory(**params)
        model.fit(dX, dy, group=dg)
        raw_predictions = model.predict(dX, raw_score=True).compute()

        trees_df = model.booster_.trees_to_dataframe()
        leaves_df = trees_df[trees_df.node_depth == 2]
1518
        if task == "multiclass-classification":
1519
1520
            for i in range(model.n_classes_):
                class_df = leaves_df[leaves_df.tree_index == i]
1521
                assert set(raw_predictions[:, i]) == set(class_df["value"])
1522
        else:
1523
            assert set(raw_predictions) == set(leaves_df["value"])
1524

1525
        if task.endswith("classification"):
1526
1527
            pred_proba_raw = model.predict_proba(dX, raw_score=True).compute()
            assert_eq(raw_predictions, pred_proba_raw)
1528
1529
1530
1531


def test_distributed_quantized_training(cluster):
    with Client(cluster) as client:
1532
        X, y, w, _, dX, dy, dw, _ = _create_data(objective="regression", output="array")
1533
1534
1535
1536

        np.savetxt("data_dask.csv", np.hstack([np.array([y]).T, X]), fmt="%f,%f,%f,%f,%f")

        params = {
1537
            "boosting_type": "gbdt",
1538
1539
            "n_estimators": 50,
            "num_leaves": 31,
1540
1541
1542
1543
            "use_quantized_grad": True,
            "num_grad_quant_bins": 30,
            "quant_train_renew_leaf": True,
            "verbose": -1,
1544
1545
        }

1546
        quant_dask_classifier = lgb.DaskLGBMRegressor(client=client, time_out=5, **params)
1547
1548
1549
1550
1551
        quant_dask_classifier = quant_dask_classifier.fit(dX, dy, sample_weight=dw)
        quant_p1 = quant_dask_classifier.predict(dX)
        quant_rmse = np.sqrt(np.mean((quant_p1.compute() - y) ** 2))

        params["use_quantized_grad"] = False
1552
        dask_classifier = lgb.DaskLGBMRegressor(client=client, time_out=5, **params)
1553
1554
1555
1556
        dask_classifier = dask_classifier.fit(dX, dy, sample_weight=dw)
        p1 = dask_classifier.predict(dX)
        rmse = np.sqrt(np.mean((p1.compute() - y) ** 2))
        assert quant_rmse < rmse + 7.0