test_dask.py 67.8 KB
Newer Older
1
# coding: utf-8
2
3
"""Tests for lightgbm.dask module"""

4
import inspect
5
import socket
6
7
from itertools import groupby
from os import getenv
8
from platform import machine
9
from sys import platform
10
from urllib.parse import urlparse
11
12

import pytest
13
from sklearn.metrics import accuracy_score, r2_score
14
15
16

import lightgbm as lgb

17
18
from .utils import sklearn_multiclass_custom_objective

19
20
21
22
if not platform.startswith("linux"):
    pytest.skip("lightgbm.dask is currently supported in Linux environments", allow_module_level=True)
if machine() != "x86_64":
    pytest.skip("lightgbm.dask tests are currently skipped on some architectures like arm64", allow_module_level=True)
23
if not lgb.compat.DASK_INSTALLED:
24
    pytest.skip("Dask is not installed", allow_module_level=True)
25
26
27
28
29

import dask.array as da
import dask.dataframe as dd
import numpy as np
import pandas as pd
30
import sklearn.utils.estimator_checks as sklearn_checks
31
from dask.array.utils import assert_eq
32
from dask.distributed import Client, LocalCluster, default_client, wait
33
from scipy.sparse import csc_matrix, csr_matrix
34
from scipy.stats import spearmanr
35
36
from sklearn.datasets import make_blobs, make_regression

37
from .utils import make_ranking, pickle_obj, unpickle_obj
38

39
40
41
42
tasks = ["binary-classification", "multiclass-classification", "regression", "ranking"]
distributed_training_algorithms = ["data", "voting"]
data_output = ["array", "scipy_csr_matrix", "dataframe", "dataframe-with-categorical"]
boosting_types = ["gbdt", "dart", "goss", "rf"]
43
group_sizes = [5, 5, 5, 10, 10, 10, 20, 20, 20, 50, 50]
44
task_to_dask_factory = {
45
46
47
48
    "regression": lgb.DaskLGBMRegressor,
    "binary-classification": lgb.DaskLGBMClassifier,
    "multiclass-classification": lgb.DaskLGBMClassifier,
    "ranking": lgb.DaskLGBMRanker,
49
50
}
task_to_local_factory = {
51
52
53
54
    "regression": lgb.LGBMRegressor,
    "binary-classification": lgb.LGBMClassifier,
    "multiclass-classification": lgb.LGBMClassifier,
    "ranking": lgb.LGBMRanker,
55
}
56
57

pytestmark = [
58
59
60
    pytest.mark.skipif(getenv("TASK", "") == "mpi", reason="Fails to run with MPI interface"),
    pytest.mark.skipif(getenv("TASK", "") == "gpu", reason="Fails to run with GPU interface"),
    pytest.mark.skipif(getenv("TASK", "") == "cuda", reason="Fails to run with CUDA interface"),
61
62
63
]


64
@pytest.fixture(scope="module")
65
66
67
68
69
70
def cluster():
    dask_cluster = LocalCluster(n_workers=2, threads_per_worker=2, dashboard_address=None)
    yield dask_cluster
    dask_cluster.close()


71
@pytest.fixture(scope="module")
72
73
74
75
76
77
def cluster2():
    dask_cluster = LocalCluster(n_workers=2, threads_per_worker=2, dashboard_address=None)
    yield dask_cluster
    dask_cluster.close()


78
@pytest.fixture(scope="module")
79
80
81
82
83
84
def cluster_three_workers():
    dask_cluster = LocalCluster(n_workers=3, threads_per_worker=1, dashboard_address=None)
    yield dask_cluster
    dask_cluster.close()


85
86
87
88
89
90
91
92
93
@pytest.fixture()
def listen_port():
    listen_port.port += 10
    return listen_port.port


listen_port.port = 13000


94
def _get_workers_hostname(cluster: LocalCluster) -> str:
95
    one_worker_address = next(iter(cluster.scheduler_info["workers"]))
96
97
98
    return urlparse(one_worker_address).hostname


99
def _create_ranking_data(n_samples=100, output="array", chunk_size=50, **kwargs):
100
    X, y, g = make_ranking(n_samples=n_samples, random_state=42, **kwargs)
101
102
    rnd = np.random.RandomState(42)
    w = rnd.rand(X.shape[0]) * 0.01
103
    g_rle = np.array([len(list(grp)) for _, grp in groupby(g)])
104

105
    if output.startswith("dataframe"):
106
        # add target, weight, and group to DataFrame so that partitions abide by group boundaries.
107
108
        X_df = pd.DataFrame(X, columns=[f"feature_{i}" for i in range(X.shape[1])])
        if output == "dataframe-with-categorical":
109
            for i in range(5):
110
                col_name = f"cat_col{i}"
111
112
                cat_values = rnd.choice(["a", "b"], X.shape[0])
                cat_series = pd.Series(cat_values, dtype="category")
113
                X_df[col_name] = cat_series
114
115
116
117
118
        X = X_df.copy()
        X_df = X_df.assign(y=y, g=g, w=w)

        # set_index ensures partitions are based on group id.
        # See https://stackoverflow.com/questions/49532824/dask-dataframe-split-partitions-based-on-a-column-or-function.
119
        X_df.set_index("g", inplace=True)
120
121
122
        dX = dd.from_pandas(X_df, chunksize=chunk_size)

        # separate target, weight from features.
123
124
125
        dy = dX["y"]
        dw = dX["w"]
        dX = dX.drop(columns=["y", "w"])
126
127
128
129
        dg = dX.index.to_series()

        # encode group identifiers into run-length encoding, the format LightGBMRanker is expecting
        # so that within each partition, sum(g) = n_samples.
130
131
        dg = dg.map_partitions(lambda p: p.groupby("g", sort=False).apply(lambda z: z.shape[0]))
    elif output == "array":
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
        # ranking arrays: one chunk per group. Each chunk must include all columns.
        p = X.shape[1]
        dX, dy, dw, dg = [], [], [], []
        for g_idx, rhs in enumerate(np.cumsum(g_rle)):
            lhs = rhs - g_rle[g_idx]
            dX.append(da.from_array(X[lhs:rhs, :], chunks=(rhs - lhs, p)))
            dy.append(da.from_array(y[lhs:rhs]))
            dw.append(da.from_array(w[lhs:rhs]))
            dg.append(da.from_array(np.array([g_rle[g_idx]])))

        dX = da.concatenate(dX, axis=0)
        dy = da.concatenate(dy, axis=0)
        dw = da.concatenate(dw, axis=0)
        dg = da.concatenate(dg, axis=0)
    else:
147
        raise ValueError("Ranking data creation only supported for Dask arrays and dataframes")
148
149
150
151

    return X, y, w, g_rle, dX, dy, dw, dg


152
153
154
def _create_data(objective, n_samples=1_000, output="array", chunk_size=500, **kwargs):
    if objective.endswith("classification"):
        if objective == "binary-classification":
155
            centers = [[-4, -4], [4, 4]]
156
        elif objective == "multiclass-classification":
157
158
159
            centers = [[-4, -4], [4, 4], [-4, 4]]
        else:
            raise ValueError(f"Unknown classification task '{objective}'")
160
        X, y = make_blobs(n_samples=n_samples, centers=centers, random_state=42)
161
    elif objective == "regression":
162
        X, y = make_regression(n_samples=n_samples, n_features=4, n_informative=2, random_state=42)
163
164
    elif objective == "ranking":
        return _create_ranking_data(n_samples=n_samples, output=output, chunk_size=chunk_size, **kwargs)
165
    else:
166
        raise ValueError(f"Unknown objective '{objective}'")
167
168
169
    rnd = np.random.RandomState(42)
    weights = rnd.random(X.shape[0]) * 0.01

170
    if output == "array":
171
172
173
        dX = da.from_array(X, (chunk_size, X.shape[1]))
        dy = da.from_array(y, chunk_size)
        dw = da.from_array(weights, chunk_size)
174
175
176
    elif output.startswith("dataframe"):
        X_df = pd.DataFrame(X, columns=[f"feature_{i}" for i in range(X.shape[1])])
        if output == "dataframe-with-categorical":
177
            num_cat_cols = 2
178
            for i in range(num_cat_cols):
179
                col_name = f"cat_col{i}"
180
181
                cat_values = rnd.choice(["a", "b"], X.shape[0])
                cat_series = pd.Series(cat_values, dtype="category")
182
183
184
                X_df[col_name] = cat_series
                X = np.hstack((X, cat_series.cat.codes.values.reshape(-1, 1)))

185
            # make one categorical feature relevant to the target
186
187
            cat_col_is_a = X_df["cat_col0"] == "a"
            if objective == "regression":
188
                y = np.where(cat_col_is_a, y, 2 * y)
189
            elif objective == "binary-classification":
190
                y = np.where(cat_col_is_a, y, 1 - y)
191
            elif objective == "multiclass-classification":
192
193
                n_classes = 3
                y = np.where(cat_col_is_a, y, (1 + y) % n_classes)
194
        y_df = pd.Series(y, name="target")
195
196
197
        dX = dd.from_pandas(X_df, chunksize=chunk_size)
        dy = dd.from_pandas(y_df, chunksize=chunk_size)
        dw = dd.from_array(weights, chunksize=chunk_size)
198
    elif output == "scipy_csr_matrix":
199
        dX = da.from_array(X, chunks=(chunk_size, X.shape[1])).map_blocks(csr_matrix)
200
201
        dy = da.from_array(y, chunks=chunk_size)
        dw = da.from_array(weights, chunk_size)
202
        X = csr_matrix(X)
203
    elif output == "scipy_csc_matrix":
204
205
206
207
        dX = da.from_array(X, chunks=(chunk_size, X.shape[1])).map_blocks(csc_matrix)
        dy = da.from_array(y, chunks=chunk_size)
        dw = da.from_array(weights, chunk_size)
        X = csc_matrix(X)
208
    else:
209
        raise ValueError(f"Unknown output type '{output}'")
210

211
    return X, y, weights, None, dX, dy, dw, None
212
213


214
def _r2_score(dy_true, dy_pred):
215
216
217
218
219
    y_true = dy_true.compute()
    y_pred = dy_pred.compute()
    numerator = ((y_true - y_pred) ** 2).sum(axis=0)
    denominator = ((y_true - y_true.mean(axis=0)) ** 2).sum(axis=0)
    return 1 - numerator / denominator
220
221
222


def _accuracy_score(dy_true, dy_pred):
223
224
225
    y_true = dy_true.compute()
    y_pred = dy_pred.compute()
    return (y_true == y_pred).mean()
226
227


228
def _constant_metric(y_true, y_pred):
229
    metric_name = "constant_metric"
230
231
232
233
234
    value = 0.708
    is_higher_better = False
    return metric_name, value, is_higher_better


235
236
237
238
239
240
241
242
243
244
245
246
247
def _objective_least_squares(y_true, y_pred):
    grad = y_pred - y_true
    hess = np.ones(len(y_true))
    return grad, hess


def _objective_logistic_regression(y_true, y_pred):
    y_pred = 1.0 / (1.0 + np.exp(-y_pred))
    grad = y_pred - y_true
    hess = y_pred * (1.0 - y_pred)
    return grad, hess


248
249
250
251
@pytest.mark.parametrize("output", data_output)
@pytest.mark.parametrize("task", ["binary-classification", "multiclass-classification"])
@pytest.mark.parametrize("boosting_type", boosting_types)
@pytest.mark.parametrize("tree_learner", distributed_training_algorithms)
252
253
def test_classifier(output, task, boosting_type, tree_learner, cluster):
    with Client(cluster) as client:
254
255
256
257
258
259
260
261
262
263
264
265
        X, y, w, _, dX, dy, dw, _ = _create_data(objective=task, output=output)

        params = {"boosting_type": boosting_type, "tree_learner": tree_learner, "n_estimators": 50, "num_leaves": 31}
        if boosting_type == "rf":
            params.update(
                {
                    "bagging_freq": 1,
                    "bagging_fraction": 0.9,
                }
            )
        elif boosting_type == "goss":
            params["top_rate"] = 0.5
266

267
        dask_classifier = lgb.DaskLGBMClassifier(client=client, time_out=5, **params)
268
269
        dask_classifier = dask_classifier.fit(dX, dy, sample_weight=dw)
        p1 = dask_classifier.predict(dX)
270
271
272
        p1_raw = dask_classifier.predict(dX, raw_score=True).compute()
        p1_first_iter_raw = dask_classifier.predict(dX, start_iteration=0, num_iteration=1, raw_score=True).compute()
        p1_early_stop_raw = dask_classifier.predict(
273
            dX, pred_early_stop=True, pred_early_stop_margin=1.0, pred_early_stop_freq=2, raw_score=True
274
        ).compute()
275
276
277
278
279
280
281
282
283
284
285
286
        p1_proba = dask_classifier.predict_proba(dX).compute()
        p1_pred_leaf = dask_classifier.predict(dX, pred_leaf=True)
        p1_local = dask_classifier.to_local().predict(X)
        s1 = _accuracy_score(dy, p1)
        p1 = p1.compute()

        local_classifier = lgb.LGBMClassifier(**params)
        local_classifier.fit(X, y, sample_weight=w)
        p2 = local_classifier.predict(X)
        p2_proba = local_classifier.predict_proba(X)
        s2 = local_classifier.score(X, y)

287
        if boosting_type == "rf":
288
289
290
291
292
293
294
295
296
297
298
299
            # https://github.com/microsoft/LightGBM/issues/4118
            assert_eq(s1, s2, atol=0.01)
            assert_eq(p1_proba, p2_proba, atol=0.8)
        else:
            assert_eq(s1, s2)
            assert_eq(p1, p2)
            assert_eq(p1, y)
            assert_eq(p2, y)
            assert_eq(p1_proba, p2_proba, atol=0.03)
            assert_eq(p1_local, p2)
            assert_eq(p1_local, y)

300
301
302
303
304
305
306
        # extra predict() parameters should be passed through correctly
        with pytest.raises(AssertionError):
            assert_eq(p1_raw, p1_first_iter_raw)

        with pytest.raises(AssertionError):
            assert_eq(p1_raw, p1_early_stop_raw)

307
308
309
        # pref_leaf values should have the right shape
        # and values that look like valid tree nodes
        pred_leaf_vals = p1_pred_leaf.compute()
310
311
        assert pred_leaf_vals.shape == (X.shape[0], dask_classifier.booster_.num_trees())
        assert np.max(pred_leaf_vals) <= params["num_leaves"]
312
        assert np.min(pred_leaf_vals) >= 0
313
        assert len(np.unique(pred_leaf_vals)) <= params["num_leaves"]
314
315
316

        # be sure LightGBM actually used at least one categorical column,
        # and that it was correctly treated as a categorical feature
317
318
        if output == "dataframe-with-categorical":
            cat_cols = [col for col in dX.columns if dX.dtypes[col].name == "category"]
319
            tree_df = dask_classifier.booster_.trees_to_dataframe()
320
            node_uses_cat_col = tree_df["split_feature"].isin(cat_cols)
321
            assert node_uses_cat_col.sum() > 0
322
            assert tree_df.loc[node_uses_cat_col, "decision_type"].unique()[0] == "=="
323

324

325
326
@pytest.mark.parametrize("output", data_output + ["scipy_csc_matrix"])
@pytest.mark.parametrize("task", ["binary-classification", "multiclass-classification"])
327
328
def test_classifier_pred_contrib(output, task, cluster):
    with Client(cluster) as client:
329
        X, y, w, _, dX, dy, dw, _ = _create_data(objective=task, output=output)
330

331
        params = {"n_estimators": 10, "num_leaves": 10}
332

333
        dask_classifier = lgb.DaskLGBMClassifier(client=client, time_out=5, tree_learner="data", **params)
334
        dask_classifier = dask_classifier.fit(dX, dy, sample_weight=dw)
335
        preds_with_contrib = dask_classifier.predict(dX, pred_contrib=True)
336
337
338
339
340

        local_classifier = lgb.LGBMClassifier(**params)
        local_classifier.fit(X, y, sample_weight=w)
        local_preds_with_contrib = local_classifier.predict(X, pred_contrib=True)

341
342
343
344
345
346
347
348
349
350
351
352
353
        # shape depends on whether it is binary or multiclass classification
        num_features = dask_classifier.n_features_
        num_classes = dask_classifier.n_classes_
        if num_classes == 2:
            expected_num_cols = num_features + 1
        else:
            expected_num_cols = (num_features + 1) * num_classes

        # in the special case of multi-class classification using scipy sparse matrices,
        # the output of `.predict(..., pred_contrib=True)` is a list of sparse matrices (one per class)
        #
        # since that case is so different than all other cases, check the relevant things here
        # and then return early
354
355
        if output.startswith("scipy") and task == "multiclass-classification":
            if output == "scipy_csr_matrix":
356
                expected_type = csr_matrix
357
            elif output == "scipy_csc_matrix":
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
                expected_type = csc_matrix
            else:
                raise ValueError(f"Unrecognized output type: {output}")
            assert isinstance(preds_with_contrib, list)
            assert all(isinstance(arr, da.Array) for arr in preds_with_contrib)
            assert all(isinstance(arr._meta, expected_type) for arr in preds_with_contrib)
            assert len(preds_with_contrib) == num_classes
            assert len(preds_with_contrib) == len(local_preds_with_contrib)
            for i in range(num_classes):
                computed_preds = preds_with_contrib[i].compute()
                assert isinstance(computed_preds, expected_type)
                assert computed_preds.shape[1] == num_classes
                assert computed_preds.shape == local_preds_with_contrib[i].shape
                assert len(np.unique(computed_preds[:, -1])) == 1
                # raw scores will probably be different, but at least check that all predicted classes are the same
                pred_classes = np.argmax(computed_preds.toarray(), axis=1)
                local_pred_classes = np.argmax(local_preds_with_contrib[i].toarray(), axis=1)
                np.testing.assert_array_equal(pred_classes, local_pred_classes)
            return

        preds_with_contrib = preds_with_contrib.compute()
379
        if output.startswith("scipy"):
380
            preds_with_contrib = preds_with_contrib.toarray()
381
382
383

        # be sure LightGBM actually used at least one categorical column,
        # and that it was correctly treated as a categorical feature
384
385
        if output == "dataframe-with-categorical":
            cat_cols = [col for col in dX.columns if dX.dtypes[col].name == "category"]
386
            tree_df = dask_classifier.booster_.trees_to_dataframe()
387
            node_uses_cat_col = tree_df["split_feature"].isin(cat_cols)
388
            assert node_uses_cat_col.sum() > 0
389
            assert tree_df.loc[node_uses_cat_col, "decision_type"].unique()[0] == "=="
390
391
392
393
394
395
396
397
398
399

        # * shape depends on whether it is binary or multiclass classification
        # * matrix for binary classification is of the form [feature_contrib, base_value],
        #   for multi-class it's [feat_contrib_class1, base_value_class1, feat_contrib_class2, base_value_class2, etc.]
        # * contrib outputs for distributed training are different than from local training, so we can just test
        #   that the output has the right shape and base values are in the right position
        assert preds_with_contrib.shape[1] == expected_num_cols
        assert preds_with_contrib.shape == local_preds_with_contrib.shape

        if num_classes == 2:
400
            assert len(np.unique(preds_with_contrib[:, num_features])) == 1
401
402
403
404
405
406
        else:
            for i in range(num_classes):
                base_value_col = num_features * (i + 1) + i
                assert len(np.unique(preds_with_contrib[:, base_value_col]) == 1)


407
408
@pytest.mark.parametrize("output", data_output)
@pytest.mark.parametrize("task", ["binary-classification", "multiclass-classification"])
409
410
411
412
413
414
415
416
417
418
419
420
421
def test_classifier_custom_objective(output, task, cluster):
    with Client(cluster) as client:
        X, y, w, _, dX, dy, dw, _ = _create_data(
            objective=task,
            output=output,
        )

        params = {
            "n_estimators": 50,
            "num_leaves": 31,
            "verbose": -1,
            "seed": 708,
            "deterministic": True,
422
            "force_col_wise": True,
423
424
        }

425
426
427
428
429
430
431
432
433
434
        if task == "binary-classification":
            params.update(
                {
                    "objective": _objective_logistic_regression,
                }
            )
        elif task == "multiclass-classification":
            params.update({"objective": sklearn_multiclass_custom_objective, "num_classes": 3})

        dask_classifier = lgb.DaskLGBMClassifier(client=client, time_out=5, tree_learner="data", **params)
435
436
437
438
439
440
441
442
443
444
        dask_classifier = dask_classifier.fit(dX, dy, sample_weight=dw)
        dask_classifier_local = dask_classifier.to_local()
        p1_raw = dask_classifier.predict(dX, raw_score=True).compute()
        p1_raw_local = dask_classifier_local.predict(X, raw_score=True)

        local_classifier = lgb.LGBMClassifier(**params)
        local_classifier.fit(X, y, sample_weight=w)
        p2_raw = local_classifier.predict(X, raw_score=True)

        # with a custom objective, prediction result is a raw score instead of predicted class
445
        if task == "binary-classification":
446
447
448
449
450
451
            p1_proba = 1.0 / (1.0 + np.exp(-p1_raw))
            p1_class = (p1_proba > 0.5).astype(np.int64)
            p1_proba_local = 1.0 / (1.0 + np.exp(-p1_raw_local))
            p1_class_local = (p1_proba_local > 0.5).astype(np.int64)
            p2_proba = 1.0 / (1.0 + np.exp(-p2_raw))
            p2_class = (p2_proba > 0.5).astype(np.int64)
452
        elif task == "multiclass-classification":
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
            p1_proba = np.exp(p1_raw) / np.sum(np.exp(p1_raw), axis=1).reshape(-1, 1)
            p1_class = p1_proba.argmax(axis=1)
            p1_proba_local = np.exp(p1_raw_local) / np.sum(np.exp(p1_raw_local), axis=1).reshape(-1, 1)
            p1_class_local = p1_proba_local.argmax(axis=1)
            p2_proba = np.exp(p2_raw) / np.sum(np.exp(p2_raw), axis=1).reshape(-1, 1)
            p2_class = p2_proba.argmax(axis=1)

        # function should have been preserved
        assert callable(dask_classifier.objective_)
        assert callable(dask_classifier_local.objective_)

        # should correctly classify every sample
        assert_eq(p1_class, y)
        assert_eq(p1_class_local, y)
        assert_eq(p2_class, y)

        # probability estimates should be similar
        assert_eq(p1_proba, p2_proba, atol=0.03)
        assert_eq(p1_proba, p1_proba_local)


474
def test_machines_to_worker_map_unparseable_host_names():
475
    workers = {"0.0.0.1:80": {}, "0.0.0.2:80": {}}
476
477
478
479
480
    machines = "0.0.0.1:80,0.0.0.2:80"
    with pytest.raises(ValueError, match="Could not parse host name from worker address '0.0.0.1:80'"):
        lgb.dask._machines_to_worker_map(machines=machines, worker_addresses=workers.keys())


481
482
def test_training_does_not_fail_on_port_conflicts(cluster):
    with Client(cluster) as client:
483
        _, _, _, _, dX, dy, dw, _ = _create_data("binary-classification", output="array")
484
485

        lightgbm_default_port = 12400
486
        workers_hostname = _get_workers_hostname(cluster)
487
        with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
488
            s.bind((workers_hostname, lightgbm_default_port))
489
            dask_classifier = lgb.DaskLGBMClassifier(client=client, time_out=5, n_estimators=5, num_leaves=5)
490
491
492
493
494
495
496
            for _ in range(5):
                dask_classifier.fit(
                    X=dX,
                    y=dy,
                    sample_weight=dw,
                )
                assert dask_classifier.booster_
497

498

499
500
501
@pytest.mark.parametrize("output", data_output)
@pytest.mark.parametrize("boosting_type", boosting_types)
@pytest.mark.parametrize("tree_learner", distributed_training_algorithms)
502
503
def test_regressor(output, boosting_type, tree_learner, cluster):
    with Client(cluster) as client:
504
        X, y, w, _, dX, dy, dw, _ = _create_data(objective="regression", output=output)
505
506
507
508
509
510
511

        params = {
            "boosting_type": boosting_type,
            "random_state": 42,
            "num_leaves": 31,
            "n_estimators": 20,
        }
512
513
514
515
516
517
518
        if boosting_type == "rf":
            params.update(
                {
                    "bagging_freq": 1,
                    "bagging_fraction": 0.9,
                }
            )
519

520
        dask_regressor = lgb.DaskLGBMRegressor(client=client, time_out=5, tree=tree_learner, **params)
521
522
523
524
525
526
        dask_regressor = dask_regressor.fit(dX, dy, sample_weight=dw)
        p1 = dask_regressor.predict(dX)
        p1_pred_leaf = dask_regressor.predict(dX, pred_leaf=True)

        s1 = _r2_score(dy, p1)
        p1 = p1.compute()
527
528
        p1_raw = dask_regressor.predict(dX, raw_score=True).compute()
        p1_first_iter_raw = dask_regressor.predict(dX, start_iteration=0, num_iteration=1, raw_score=True).compute()
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
        p1_local = dask_regressor.to_local().predict(X)
        s1_local = dask_regressor.to_local().score(X, y)

        local_regressor = lgb.LGBMRegressor(**params)
        local_regressor.fit(X, y, sample_weight=w)
        s2 = local_regressor.score(X, y)
        p2 = local_regressor.predict(X)

        # Scores should be the same
        assert_eq(s1, s2, atol=0.01)
        assert_eq(s1, s1_local)

        # Predictions should be roughly the same.
        assert_eq(p1, p1_local)

        # pref_leaf values should have the right shape
        # and values that look like valid tree nodes
        pred_leaf_vals = p1_pred_leaf.compute()
547
548
        assert pred_leaf_vals.shape == (X.shape[0], dask_regressor.booster_.num_trees())
        assert np.max(pred_leaf_vals) <= params["num_leaves"]
549
        assert np.min(pred_leaf_vals) >= 0
550
        assert len(np.unique(pred_leaf_vals)) <= params["num_leaves"]
551

552
553
        assert_eq(p1, y, rtol=0.5, atol=50.0)
        assert_eq(p2, y, rtol=0.5, atol=50.0)
554

555
556
557
558
        # extra predict() parameters should be passed through correctly
        with pytest.raises(AssertionError):
            assert_eq(p1_raw, p1_first_iter_raw)

559
560
        # be sure LightGBM actually used at least one categorical column,
        # and that it was correctly treated as a categorical feature
561
562
        if output == "dataframe-with-categorical":
            cat_cols = [col for col in dX.columns if dX.dtypes[col].name == "category"]
563
            tree_df = dask_regressor.booster_.trees_to_dataframe()
564
            node_uses_cat_col = tree_df["split_feature"].isin(cat_cols)
565
            assert node_uses_cat_col.sum() > 0
566
            assert tree_df.loc[node_uses_cat_col, "decision_type"].unique()[0] == "=="
567

568

569
@pytest.mark.parametrize("output", data_output)
570
571
def test_regressor_pred_contrib(output, cluster):
    with Client(cluster) as client:
572
        X, y, w, _, dX, dy, dw, _ = _create_data(objective="regression", output=output)
573

574
        params = {"n_estimators": 10, "num_leaves": 10}
575

576
        dask_regressor = lgb.DaskLGBMRegressor(client=client, time_out=5, tree_learner="data", **params)
577
578
579
580
581
582
583
584
        dask_regressor = dask_regressor.fit(dX, dy, sample_weight=dw)
        preds_with_contrib = dask_regressor.predict(dX, pred_contrib=True).compute()

        local_regressor = lgb.LGBMRegressor(**params)
        local_regressor.fit(X, y, sample_weight=w)
        local_preds_with_contrib = local_regressor.predict(X, pred_contrib=True)

        if output == "scipy_csr_matrix":
585
            preds_with_contrib = preds_with_contrib.toarray()
586
587
588
589
590
591
592
593
594

        # contrib outputs for distributed training are different than from local training, so we can just test
        # that the output has the right shape and base values are in the right position
        num_features = dX.shape[1]
        assert preds_with_contrib.shape[1] == num_features + 1
        assert preds_with_contrib.shape == local_preds_with_contrib.shape

        # be sure LightGBM actually used at least one categorical column,
        # and that it was correctly treated as a categorical feature
595
596
        if output == "dataframe-with-categorical":
            cat_cols = [col for col in dX.columns if dX.dtypes[col].name == "category"]
597
            tree_df = dask_regressor.booster_.trees_to_dataframe()
598
            node_uses_cat_col = tree_df["split_feature"].isin(cat_cols)
599
            assert node_uses_cat_col.sum() > 0
600
            assert tree_df.loc[node_uses_cat_col, "decision_type"].unique()[0] == "=="
601

602

603
604
@pytest.mark.parametrize("output", data_output)
@pytest.mark.parametrize("alpha", [0.1, 0.5, 0.9])
605
606
def test_regressor_quantile(output, alpha, cluster):
    with Client(cluster) as client:
607
        X, y, w, _, dX, dy, dw, _ = _create_data(objective="regression", output=output)
608

609
        params = {"objective": "quantile", "alpha": alpha, "random_state": 42, "n_estimators": 10, "num_leaves": 10}
610

611
        dask_regressor = lgb.DaskLGBMRegressor(client=client, tree_learner_type="data_parallel", **params)
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
        dask_regressor = dask_regressor.fit(dX, dy, sample_weight=dw)
        p1 = dask_regressor.predict(dX).compute()
        q1 = np.count_nonzero(y < p1) / y.shape[0]

        local_regressor = lgb.LGBMRegressor(**params)
        local_regressor.fit(X, y, sample_weight=w)
        p2 = local_regressor.predict(X)
        q2 = np.count_nonzero(y < p2) / y.shape[0]

        # Quantiles should be right
        np.testing.assert_allclose(q1, alpha, atol=0.2)
        np.testing.assert_allclose(q2, alpha, atol=0.2)

        # be sure LightGBM actually used at least one categorical column,
        # and that it was correctly treated as a categorical feature
627
628
        if output == "dataframe-with-categorical":
            cat_cols = [col for col in dX.columns if dX.dtypes[col].name == "category"]
629
            tree_df = dask_regressor.booster_.trees_to_dataframe()
630
            node_uses_cat_col = tree_df["split_feature"].isin(cat_cols)
631
            assert node_uses_cat_col.sum() > 0
632
            assert tree_df.loc[node_uses_cat_col, "decision_type"].unique()[0] == "=="
633

634

635
@pytest.mark.parametrize("output", data_output)
636
637
def test_regressor_custom_objective(output, cluster):
    with Client(cluster) as client:
638
        X, y, w, _, dX, dy, dw, _ = _create_data(objective="regression", output=output)
639

640
        params = {"n_estimators": 10, "num_leaves": 10, "objective": _objective_least_squares}
641

642
        dask_regressor = lgb.DaskLGBMRegressor(client=client, time_out=5, tree_learner="data", **params)
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
        dask_regressor = dask_regressor.fit(dX, dy, sample_weight=dw)
        dask_regressor_local = dask_regressor.to_local()
        p1 = dask_regressor.predict(dX)
        p1_local = dask_regressor_local.predict(X)
        s1_local = dask_regressor_local.score(X, y)
        s1 = _r2_score(dy, p1)
        p1 = p1.compute()

        local_regressor = lgb.LGBMRegressor(**params)
        local_regressor.fit(X, y, sample_weight=w)
        p2 = local_regressor.predict(X)
        s2 = local_regressor.score(X, y)

        # function should have been preserved
        assert callable(dask_regressor.objective_)
        assert callable(dask_regressor_local.objective_)

        # Scores should be the same
        assert_eq(s1, s2, atol=0.01)
        assert_eq(s1, s1_local)

        # local and Dask predictions should be the same
        assert_eq(p1, p1_local)

        # predictions should be better than random
668
        assert_precision = {"rtol": 0.5, "atol": 50.0}
669
670
671
672
        assert_eq(p1, y, **assert_precision)
        assert_eq(p2, y, **assert_precision)


673
674
675
676
@pytest.mark.parametrize("output", ["array", "dataframe", "dataframe-with-categorical"])
@pytest.mark.parametrize("group", [None, group_sizes])
@pytest.mark.parametrize("boosting_type", boosting_types)
@pytest.mark.parametrize("tree_learner", distributed_training_algorithms)
677
678
def test_ranker(output, group, boosting_type, tree_learner, cluster):
    with Client(cluster) as client:
679
        if output == "dataframe-with-categorical":
680
            X, y, w, g, dX, dy, dw, dg = _create_data(
681
                objective="ranking", output=output, group=group, n_features=1, n_informative=1
682
683
            )
        else:
684
            X, y, w, g, dX, dy, dw, dg = _create_data(objective="ranking", output=output, group=group)
685
686

        # rebalance small dask.Array dataset for better performance.
687
        if output == "array":
688
689
690
691
692
693
694
695
696
697
698
699
700
701
            dX = dX.persist()
            dy = dy.persist()
            dw = dw.persist()
            dg = dg.persist()
            _ = wait([dX, dy, dw, dg])
            client.rebalance()

        # use many trees + leaves to overfit, help ensure that Dask data-parallel strategy matches that of
        # serial learner. See https://github.com/microsoft/LightGBM/issues/3292#issuecomment-671288210.
        params = {
            "boosting_type": boosting_type,
            "random_state": 42,
            "n_estimators": 50,
            "num_leaves": 20,
702
            "min_child_samples": 1,
703
        }
704
705
706
707
708
709
710
711
712
        if boosting_type == "rf":
            params.update(
                {
                    "bagging_freq": 1,
                    "bagging_fraction": 0.9,
                }
            )

        dask_ranker = lgb.DaskLGBMRanker(client=client, time_out=5, tree_learner_type=tree_learner, **params)
713
714
715
716
        dask_ranker = dask_ranker.fit(dX, dy, sample_weight=dw, group=dg)
        rnkvec_dask = dask_ranker.predict(dX)
        rnkvec_dask = rnkvec_dask.compute()
        p1_pred_leaf = dask_ranker.predict(dX, pred_leaf=True)
717
718
        p1_raw = dask_ranker.predict(dX, raw_score=True).compute()
        p1_first_iter_raw = dask_ranker.predict(dX, start_iteration=0, num_iteration=1, raw_score=True).compute()
719
        p1_early_stop_raw = dask_ranker.predict(
720
            dX, pred_early_stop=True, pred_early_stop_margin=1.0, pred_early_stop_freq=2, raw_score=True
721
        ).compute()
722
723
724
725
726
727
728
729
730
731
732
733
734
        rnkvec_dask_local = dask_ranker.to_local().predict(X)

        local_ranker = lgb.LGBMRanker(**params)
        local_ranker.fit(X, y, sample_weight=w, group=g)
        rnkvec_local = local_ranker.predict(X)

        # distributed ranker should be able to rank decently well and should
        # have high rank correlation with scores from serial ranker.
        dcor = spearmanr(rnkvec_dask, y).correlation
        assert dcor > 0.6
        assert spearmanr(rnkvec_dask, rnkvec_local).correlation > 0.8
        assert_eq(rnkvec_dask, rnkvec_dask_local)

735
736
737
738
        # extra predict() parameters should be passed through correctly
        with pytest.raises(AssertionError):
            assert_eq(p1_raw, p1_first_iter_raw)

739
740
741
        with pytest.raises(AssertionError):
            assert_eq(p1_raw, p1_early_stop_raw)

742
743
744
        # pref_leaf values should have the right shape
        # and values that look like valid tree nodes
        pred_leaf_vals = p1_pred_leaf.compute()
745
746
        assert pred_leaf_vals.shape == (X.shape[0], dask_ranker.booster_.num_trees())
        assert np.max(pred_leaf_vals) <= params["num_leaves"]
747
        assert np.min(pred_leaf_vals) >= 0
748
        assert len(np.unique(pred_leaf_vals)) <= params["num_leaves"]
749

750
751
        # be sure LightGBM actually used at least one categorical column,
        # and that it was correctly treated as a categorical feature
752
753
        if output == "dataframe-with-categorical":
            cat_cols = [col for col in dX.columns if dX.dtypes[col].name == "category"]
754
            tree_df = dask_ranker.booster_.trees_to_dataframe()
755
            node_uses_cat_col = tree_df["split_feature"].isin(cat_cols)
756
            assert node_uses_cat_col.sum() > 0
757
            assert tree_df.loc[node_uses_cat_col, "decision_type"].unique()[0] == "=="
758

759

760
@pytest.mark.parametrize("output", ["array", "dataframe", "dataframe-with-categorical"])
761
762
def test_ranker_custom_objective(output, cluster):
    with Client(cluster) as client:
763
        if output == "dataframe-with-categorical":
764
            X, y, w, g, dX, dy, dw, dg = _create_data(
765
                objective="ranking", output=output, group=group_sizes, n_features=1, n_informative=1
766
767
            )
        else:
768
            X, y, w, g, dX, dy, dw, dg = _create_data(objective="ranking", output=output, group=group_sizes)
769
770

        # rebalance small dask.Array dataset for better performance.
771
        if output == "array":
772
773
774
775
776
777
778
779
780
781
782
783
            dX = dX.persist()
            dy = dy.persist()
            dw = dw.persist()
            dg = dg.persist()
            _ = wait([dX, dy, dw, dg])
            client.rebalance()

        params = {
            "random_state": 42,
            "n_estimators": 50,
            "num_leaves": 20,
            "min_child_samples": 1,
784
            "objective": _objective_least_squares,
785
786
        }

787
        dask_ranker = lgb.DaskLGBMRanker(client=client, time_out=5, tree_learner_type="data", **params)
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
        dask_ranker = dask_ranker.fit(dX, dy, sample_weight=dw, group=dg)
        rnkvec_dask = dask_ranker.predict(dX).compute()
        dask_ranker_local = dask_ranker.to_local()
        rnkvec_dask_local = dask_ranker_local.predict(X)

        local_ranker = lgb.LGBMRanker(**params)
        local_ranker.fit(X, y, sample_weight=w, group=g)
        rnkvec_local = local_ranker.predict(X)

        # distributed ranker should be able to rank decently well with the least-squares objective
        # and should have high rank correlation with scores from serial ranker.
        assert spearmanr(rnkvec_dask, y).correlation > 0.6
        assert spearmanr(rnkvec_dask, rnkvec_local).correlation > 0.8
        assert_eq(rnkvec_dask, rnkvec_dask_local)

        # function should have been preserved
        assert callable(dask_ranker.objective_)
        assert callable(dask_ranker_local.objective_)


808
809
810
811
@pytest.mark.parametrize("task", tasks)
@pytest.mark.parametrize("output", data_output)
@pytest.mark.parametrize("eval_sizes", [[0.5, 1, 1.5], [0]])
@pytest.mark.parametrize("eval_names_prefix", ["specified", None])
812
def test_eval_set_no_early_stopping(task, output, eval_sizes, eval_names_prefix, cluster):
813
814
    if task == "ranking" and output == "scipy_csr_matrix":
        pytest.skip("LGBMRanker is not currently tested on sparse matrices")
815
816
817
818
819
820
821
822
823
824
825
826
827

    with Client(cluster) as client:
        # Use larger trainset to prevent premature stopping due to zero loss, causing num_trees() < n_estimators.
        # Use small chunk_size to avoid single-worker allocation of eval data partitions.
        n_samples = 1000
        chunk_size = 10
        n_eval_sets = len(eval_sizes)
        eval_set = []
        eval_sample_weight = []
        eval_class_weight = None
        eval_init_score = None

        if eval_names_prefix:
828
            eval_names = [f"{eval_names_prefix}_{i}" for i in range(len(eval_sizes))]
829
830
831
832
        else:
            eval_names = None

        X, y, w, g, dX, dy, dw, dg = _create_data(
833
            objective=task, n_samples=n_samples, output=output, chunk_size=chunk_size
834
835
        )

836
837
        if task == "ranking":
            eval_metrics = ["ndcg"]
838
            eval_at = (5, 6)
839
            eval_metric_names = [f"ndcg@{k}" for k in eval_at]
840
841
842
843
            eval_group = []
        else:
            # test eval_class_weight, eval_init_score on binary-classification task.
            # Note: objective's default `metric` will be evaluated in evals_result_ in addition to all eval_metrics.
844
845
846
            if task == "binary-classification":
                eval_metrics = ["binary_error", "auc"]
                eval_metric_names = ["binary_logloss", "binary_error", "auc"]
847
848
                eval_class_weight = []
                eval_init_score = []
849
850
851
852
853
854
            elif task == "multiclass-classification":
                eval_metrics = ["multi_error"]
                eval_metric_names = ["multi_logloss", "multi_error"]
            elif task == "regression":
                eval_metrics = ["l1"]
                eval_metric_names = ["l2", "l1"]
855
856
857
858
859
860
861
862
863
864
865
866

        # create eval_sets by creating new datasets or copying training data.
        for eval_size in eval_sizes:
            if eval_size == 1:
                y_e = y
                dX_e = dX
                dy_e = dy
                dw_e = dw
                dg_e = dg
            else:
                n_eval_samples = max(chunk_size, int(n_samples * eval_size))
                _, y_e, _, _, dX_e, dy_e, dw_e, dg_e = _create_data(
867
                    objective=task, n_samples=n_eval_samples, output=output, chunk_size=chunk_size
868
869
870
871
                )

            eval_set.append((dX_e, dy_e))
            eval_sample_weight.append(dw_e)
872
            if task == "ranking":
873
874
                eval_group.append(dg_e)

875
            if task == "binary-classification":
876
877
878
879
                n_neg = np.sum(y_e == 0)
                n_pos = np.sum(y_e == 1)
                eval_class_weight.append({0: n_neg / n_pos, 1: n_pos / n_neg})
                init_score_value = np.log(np.mean(y_e) / (1 - np.mean(y_e)))
880
                if "dataframe" in output:
881
                    d_init_score = dy_e.map_partitions(lambda x, val=init_score_value: pd.Series([val] * x.size))
882
                else:
883
                    d_init_score = dy_e.map_blocks(lambda x, val=init_score_value: np.repeat(val, x.size))
884
885
886
887

                eval_init_score.append(d_init_score)

        fit_trees = 50
888
        params = {"random_state": 42, "n_estimators": fit_trees, "num_leaves": 2}
889
890

        model_factory = task_to_dask_factory[task]
891
        dask_model = model_factory(client=client, **params)
892
893

        fit_params = {
894
895
896
897
898
899
900
            "X": dX,
            "y": dy,
            "eval_set": eval_set,
            "eval_names": eval_names,
            "eval_sample_weight": eval_sample_weight,
            "eval_init_score": eval_init_score,
            "eval_metric": eval_metrics,
901
        }
902
903
904
905
        if task == "ranking":
            fit_params.update({"group": dg, "eval_group": eval_group, "eval_at": eval_at})
        elif task == "binary-classification":
            fit_params.update({"eval_class_weight": eval_class_weight})
906
907

        if eval_sizes == [0]:
908
909
910
911
            with pytest.warns(
                UserWarning,
                match="Worker (.*) was not allocated eval_set data. Therefore evals_result_ and best_score_ data may be unreliable.",
            ):
912
913
914
915
916
                dask_model.fit(**fit_params)
        else:
            dask_model = dask_model.fit(**fit_params)

            # total number of trees scales up for ova classifier.
917
            if task == "multiclass-classification":
918
919
920
921
922
923
                model_trees = fit_trees * dask_model.n_classes_
            else:
                model_trees = fit_trees

            # check that early stopping was not applied.
            assert dask_model.booster_.num_trees() == model_trees
924
            assert dask_model.best_iteration_ == 0
925
926
927
928
929
930
931
932
933
934
935
936
937
938

            # checks that evals_result_ and best_score_ contain expected data and eval_set names.
            evals_result = dask_model.evals_result_
            best_scores = dask_model.best_score_
            assert len(evals_result) == n_eval_sets
            assert len(best_scores) == n_eval_sets

            for eval_name in evals_result:
                assert eval_name in dask_model.best_score_
                if eval_names:
                    assert eval_name in eval_names

                # check that each eval_name and metric exists for all eval sets, allowing for the
                # case when a worker receives a fully-padded eval_set component which is not evaluated.
939
                if evals_result[eval_name] != {}:
940
941
942
943
944
945
                    for metric in eval_metric_names:
                        assert metric in evals_result[eval_name]
                        assert metric in best_scores[eval_name]
                        assert len(evals_result[eval_name][metric]) == fit_trees


946
@pytest.mark.parametrize("task", ["binary-classification", "regression", "ranking"])
947
948
949
950
951
def test_eval_set_with_custom_eval_metric(task, cluster):
    with Client(cluster) as client:
        n_samples = 1000
        n_eval_samples = int(n_samples * 0.5)
        chunk_size = 10
952
        output = "array"
953
954

        X, y, w, g, dX, dy, dw, dg = _create_data(
955
            objective=task, n_samples=n_samples, output=output, chunk_size=chunk_size
956
957
        )
        _, _, _, _, dX_e, dy_e, _, dg_e = _create_data(
958
            objective=task, n_samples=n_eval_samples, output=output, chunk_size=chunk_size
959
960
        )

961
        if task == "ranking":
962
            eval_at = (5, 6)
963
964
965
966
967
            eval_metrics = ["ndcg", _constant_metric]
            eval_metric_names = [f"ndcg@{k}" for k in eval_at] + ["constant_metric"]
        elif task == "binary-classification":
            eval_metrics = ["binary_error", "auc", _constant_metric]
            eval_metric_names = ["binary_logloss", "binary_error", "auc", "constant_metric"]
968
        else:
969
970
            eval_metrics = ["l1", _constant_metric]
            eval_metric_names = ["l2", "l1", "constant_metric"]
971
972

        fit_trees = 50
973
        params = {"random_state": 42, "n_estimators": fit_trees, "num_leaves": 2}
974
        model_factory = task_to_dask_factory[task]
975
        dask_model = model_factory(client=client, **params)
976
977

        eval_set = [(dX_e, dy_e)]
978
979
980
        fit_params = {"X": dX, "y": dy, "eval_set": eval_set, "eval_metric": eval_metrics}
        if task == "ranking":
            fit_params.update({"group": dg, "eval_group": [dg_e], "eval_at": eval_at})
981
982
983

        dask_model = dask_model.fit(**fit_params)

984
        eval_name = "valid_0"
985
986
987
988
989
990
991
992
        evals_result = dask_model.evals_result_
        assert len(evals_result) == 1
        assert eval_name in evals_result

        for metric in eval_metric_names:
            assert metric in evals_result[eval_name]
            assert len(evals_result[eval_name][metric]) == fit_trees

993
        np.testing.assert_allclose(evals_result[eval_name]["constant_metric"], 0.708)
994
995


996
@pytest.mark.parametrize("task", tasks)
997
998
def test_training_works_if_client_not_provided_or_set_after_construction(task, cluster):
    with Client(cluster) as client:
999
        _, _, _, _, dX, dy, _, dg = _create_data(objective=task, output="array", group=None)
1000
1001
        model_factory = task_to_dask_factory[task]

1002
        params = {"time_out": 5, "n_estimators": 1, "num_leaves": 2}
1003
1004
1005
1006

        # should be able to use the class without specifying a client
        dask_model = model_factory(**params)
        assert dask_model.client is None
1007
        with pytest.raises(lgb.compat.LGBMNotFittedError, match="Cannot access property client_ before calling fit"):
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
            dask_model.client_

        dask_model.fit(dX, dy, group=dg)
        assert dask_model.fitted_
        assert dask_model.client is None
        assert dask_model.client_ == client

        preds = dask_model.predict(dX)
        assert isinstance(preds, da.Array)
        assert dask_model.fitted_
        assert dask_model.client is None
        assert dask_model.client_ == client

        local_model = dask_model.to_local()
        with pytest.raises(AttributeError):
            local_model.client
            local_model.client_

        # should be able to set client after construction
        dask_model = model_factory(**params)
        dask_model.set_params(client=client)
        assert dask_model.client == client

1031
        with pytest.raises(lgb.compat.LGBMNotFittedError, match="Cannot access property client_ before calling fit"):
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
            dask_model.client_

        dask_model.fit(dX, dy, group=dg)
        assert dask_model.fitted_
        assert dask_model.client == client
        assert dask_model.client_ == client

        preds = dask_model.predict(dX)
        assert isinstance(preds, da.Array)
        assert dask_model.fitted_
        assert dask_model.client == client
        assert dask_model.client_ == client

        local_model = dask_model.to_local()
        with pytest.raises(AttributeError):
            local_model.client
            local_model.client_
1049
1050


1051
1052
1053
1054
1055
1056
@pytest.mark.parametrize("serializer", ["pickle", "joblib", "cloudpickle"])
@pytest.mark.parametrize("task", tasks)
@pytest.mark.parametrize("set_client", [True, False])
def test_model_and_local_version_are_picklable_whether_or_not_client_set_explicitly(
    serializer, task, set_client, tmp_path, cluster, cluster2
):
1057
    with Client(cluster) as client1:
1058
        # data on cluster1
1059
        X_1, _, _, _, dX_1, dy_1, _, dg_1 = _create_data(objective=task, output="array", group=None)
1060

1061
        with Client(cluster2) as client2:
1062
            # create identical data on cluster2
1063
            X_2, _, _, _, dX_2, dy_2, _, dg_2 = _create_data(objective=task, output="array", group=None)
1064

1065
1066
            model_factory = task_to_dask_factory[task]

1067
            params = {"time_out": 5, "n_estimators": 1, "num_leaves": 2}
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080

            # at this point, the result of default_client() is client2 since it was the most recently
            # created. So setting client to client1 here to test that you can select a non-default client
            assert default_client() == client2
            if set_client:
                params.update({"client": client1})

            # unfitted model should survive pickling round trip, and pickling
            # shouldn't have side effects on the model object
            dask_model = model_factory(**params)
            local_model = dask_model.to_local()
            if set_client:
                assert dask_model.client == client1
1081
            else:
1082
1083
                assert dask_model.client is None

1084
1085
1086
            with pytest.raises(
                lgb.compat.LGBMNotFittedError, match="Cannot access property client_ before calling fit"
            ):
1087
1088
1089
1090
1091
                dask_model.client_

            assert "client" not in local_model.get_params()
            assert getattr(local_model, "client", None) is None

1092
            tmp_file = tmp_path / "model-1.pkl"
1093
1094
            pickle_obj(obj=dask_model, filepath=tmp_file, serializer=serializer)
            model_from_disk = unpickle_obj(filepath=tmp_file, serializer=serializer)
1095

1096
            local_tmp_file = tmp_path / "local-model-1.pkl"
1097
1098
            pickle_obj(obj=local_model, filepath=local_tmp_file, serializer=serializer)
            local_model_from_disk = unpickle_obj(filepath=local_tmp_file, serializer=serializer)
1099
1100
1101
1102
1103
1104
1105
1106

            assert model_from_disk.client is None

            if set_client:
                assert dask_model.client == client1
            else:
                assert dask_model.client is None

1107
1108
1109
            with pytest.raises(
                lgb.compat.LGBMNotFittedError, match="Cannot access property client_ before calling fit"
            ):
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
                dask_model.client_

            # client will always be None after unpickling
            if set_client:
                from_disk_params = model_from_disk.get_params()
                from_disk_params.pop("client", None)
                dask_params = dask_model.get_params()
                dask_params.pop("client", None)
                assert from_disk_params == dask_params
            else:
                assert model_from_disk.get_params() == dask_model.get_params()
            assert local_model_from_disk.get_params() == local_model.get_params()

            # fitted model should survive pickling round trip, and pickling
            # shouldn't have side effects on the model object
            if set_client:
                dask_model.fit(dX_1, dy_1, group=dg_1)
            else:
                dask_model.fit(dX_2, dy_2, group=dg_2)
            local_model = dask_model.to_local()

            assert "client" not in local_model.get_params()
            with pytest.raises(AttributeError):
                local_model.client
                local_model.client_

1136
            tmp_file2 = tmp_path / "model-2.pkl"
1137
1138
            pickle_obj(obj=dask_model, filepath=tmp_file2, serializer=serializer)
            fitted_model_from_disk = unpickle_obj(filepath=tmp_file2, serializer=serializer)
1139

1140
            local_tmp_file2 = tmp_path / "local-model-2.pkl"
1141
1142
            pickle_obj(obj=local_model, filepath=local_tmp_file2, serializer=serializer)
            local_fitted_model_from_disk = unpickle_obj(filepath=local_tmp_file2, serializer=serializer)
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180

            if set_client:
                assert dask_model.client == client1
                assert dask_model.client_ == client1
            else:
                assert dask_model.client is None
                assert dask_model.client_ == default_client()
                assert dask_model.client_ == client2

            assert isinstance(fitted_model_from_disk, model_factory)
            assert fitted_model_from_disk.client is None
            assert fitted_model_from_disk.client_ == default_client()
            assert fitted_model_from_disk.client_ == client2

            # client will always be None after unpickling
            if set_client:
                from_disk_params = fitted_model_from_disk.get_params()
                from_disk_params.pop("client", None)
                dask_params = dask_model.get_params()
                dask_params.pop("client", None)
                assert from_disk_params == dask_params
            else:
                assert fitted_model_from_disk.get_params() == dask_model.get_params()
            assert local_fitted_model_from_disk.get_params() == local_model.get_params()

            if set_client:
                preds_orig = dask_model.predict(dX_1).compute()
                preds_loaded_model = fitted_model_from_disk.predict(dX_1).compute()
                preds_orig_local = local_model.predict(X_1)
                preds_loaded_model_local = local_fitted_model_from_disk.predict(X_1)
            else:
                preds_orig = dask_model.predict(dX_2).compute()
                preds_loaded_model = fitted_model_from_disk.predict(dX_2).compute()
                preds_orig_local = local_model.predict(X_2)
                preds_loaded_model_local = local_fitted_model_from_disk.predict(X_2)

            assert_eq(preds_orig, preds_loaded_model)
            assert_eq(preds_orig_local, preds_loaded_model_local)
1181
1182


1183
1184
1185
1186
1187
def test_warns_and_continues_on_unrecognized_tree_learner(cluster):
    with Client(cluster) as client:
        X = da.random.random((1e3, 10))
        y = da.random.random((1e3, 1))
        dask_regressor = lgb.DaskLGBMRegressor(
1188
            client=client, time_out=5, tree_learner="some-nonsense-value", n_estimators=1, num_leaves=2
1189
        )
1190
        with pytest.warns(UserWarning, match="Parameter tree_learner set to some-nonsense-value"):
1191
            dask_regressor = dask_regressor.fit(X, y)
1192

1193
        assert dask_regressor.fitted_
1194

1195

1196
@pytest.mark.parametrize("tree_learner", ["data_parallel", "voting_parallel"])
1197
1198
def test_training_respects_tree_learner_aliases(tree_learner, cluster):
    with Client(cluster) as client:
1199
1200
        task = "regression"
        _, _, _, _, dX, dy, dw, dg = _create_data(objective=task, output="array")
1201
        dask_factory = task_to_dask_factory[task]
1202
        dask_model = dask_factory(client=client, tree_learner=tree_learner, time_out=5, n_estimators=10, num_leaves=15)
1203
1204
1205
        dask_model.fit(dX, dy, sample_weight=dw, group=dg)

        assert dask_model.fitted_
1206
        assert dask_model.get_params()["tree_learner"] == tree_learner
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216


def test_error_on_feature_parallel_tree_learner(cluster):
    with Client(cluster) as client:
        X = da.random.random((100, 10), chunks=(50, 10))
        y = da.random.random(100, chunks=50)
        X, y = client.persist([X, y])
        _ = wait([X, y])
        client.rebalance()
        dask_regressor = lgb.DaskLGBMRegressor(
1217
            client=client, time_out=5, tree_learner="feature_parallel", n_estimators=1, num_leaves=2
1218
        )
1219
        with pytest.raises(lgb.basic.LightGBMError, match="Do not support feature parallel in c api"):
1220
1221
1222
1223
1224
            dask_regressor = dask_regressor.fit(X, y)


def test_errors(cluster):
    with Client(cluster) as client:
1225

1226
        def f(part):
1227
            raise Exception("foo")
1228
1229
1230
1231

        df = dd.demo.make_timeseries()
        df = df.map_partitions(f, meta=df._meta)
        with pytest.raises(Exception) as info:
1232
1233
            lgb.dask._train(client=client, data=df, label=df.x, params={}, model_factory=lgb.LGBMClassifier)
            assert "foo" in str(info.value)
1234
1235


1236
1237
@pytest.mark.parametrize("task", tasks)
@pytest.mark.parametrize("output", data_output)
1238
def test_training_succeeds_even_if_some_workers_do_not_have_any_data(task, output, cluster_three_workers):
1239
1240
    if task == "ranking" and output == "scipy_csr_matrix":
        pytest.skip("LGBMRanker is not currently tested on sparse matrices")
1241

1242
1243
    with Client(cluster_three_workers) as client:
        _, y, _, _, dX, dy, dw, dg = _create_data(
1244
1245
            objective=task,
            output=output,
1246
1247
1248
            group=None,
            n_samples=1_000,
            chunk_size=200,
1249
1250
1251
1252
        )

        dask_model_factory = task_to_dask_factory[task]

1253
        workers = list(client.scheduler_info()["workers"].keys())
1254
1255
1256
1257
1258
1259
1260
        assert len(workers) == 3
        first_two_workers = workers[:2]

        dX = client.persist(dX, workers=first_two_workers)
        dy = client.persist(dy, workers=first_two_workers)
        dw = client.persist(dw, workers=first_two_workers)
        wait([dX, dy, dw])
1261

1262
1263
1264
1265
1266
1267
        workers_with_data = set()
        for coll in (dX, dy, dw):
            for with_data in client.who_has(coll).values():
                workers_with_data.update(with_data)
                assert workers[2] not in with_data
        assert len(workers_with_data) == 2
1268
1269

        params = {
1270
1271
1272
1273
            "time_out": 5,
            "random_state": 42,
            "num_leaves": 10,
            "n_estimators": 20,
1274
1275
        }

1276
        dask_model = dask_model_factory(tree="data", client=client, **params)
1277
1278
        dask_model.fit(dX, dy, group=dg, sample_weight=dw)
        dask_preds = dask_model.predict(dX).compute()
1279
        if task == "regression":
1280
            score = r2_score(y, dask_preds)
1281
        elif task.endswith("classification"):
1282
            score = accuracy_score(y, dask_preds)
1283
        else:
1284
1285
            score = spearmanr(dask_preds, y).correlation
        assert score > 0.9
1286
1287


1288
@pytest.mark.parametrize("task", tasks)
1289
1290
def test_network_params_not_required_but_respected_if_given(task, listen_port, cluster):
    with Client(cluster) as client:
1291
        _, _, _, _, dX, dy, _, dg = _create_data(objective=task, output="array", chunk_size=10, group=None)
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305

        dask_model_factory = task_to_dask_factory[task]

        # rebalance data to be sure that each worker has a piece of the data
        client.rebalance()

        # model 1 - no network parameters given
        dask_model1 = dask_model_factory(
            n_estimators=5,
            num_leaves=5,
        )
        dask_model1.fit(dX, dy, group=dg)
        assert dask_model1.fitted_
        params = dask_model1.get_params()
1306
1307
        assert "local_listen_port" not in params
        assert "machines" not in params
1308
1309

        # model 2 - machines given
1310
        workers = list(client.scheduler_info()["workers"])
1311
        workers_hostname = _get_workers_hostname(cluster)
1312
1313
1314
        remote_sockets, open_ports = lgb.dask._assign_open_ports_to_workers(client, workers)
        for s in remote_sockets.values():
            s.release()
1315
1316
1317
        dask_model2 = dask_model_factory(
            n_estimators=5,
            num_leaves=5,
1318
            machines=",".join([f"{workers_hostname}:{port}" for port in open_ports.values()]),
1319
1320
1321
1322
1323
        )

        dask_model2.fit(dX, dy, group=dg)
        assert dask_model2.fitted_
        params = dask_model2.get_params()
1324
1325
        assert "local_listen_port" not in params
        assert "machines" in params
1326
1327
1328
1329

        # model 3 - local_listen_port given
        # training should fail because LightGBM will try to use the same
        # port for multiple worker processes on the same machine
1330
        dask_model3 = dask_model_factory(n_estimators=5, num_leaves=5, local_listen_port=listen_port)
1331
1332
1333
        error_msg = "has multiple Dask worker processes running on it"
        with pytest.raises(lgb.basic.LightGBMError, match=error_msg):
            dask_model3.fit(dX, dy, group=dg)
1334
1335


1336
@pytest.mark.parametrize("task", tasks)
1337
def test_machines_should_be_used_if_provided(task, cluster):
1338
    pytest.skip("skipping due to timeout issues discussed in https://github.com/microsoft/LightGBM/issues/5390")
1339
    with Client(cluster) as client:
1340
        _, _, _, _, dX, dy, _, dg = _create_data(objective=task, output="array", chunk_size=10, group=None)
1341
1342

        dask_model_factory = task_to_dask_factory[task]
1343
1344

        # rebalance data to be sure that each worker has a piece of the data
1345
        client.rebalance()
1346

1347
        n_workers = len(client.scheduler_info()["workers"])
1348
        assert n_workers > 1
1349
        workers_hostname = _get_workers_hostname(cluster)
1350
        open_ports = lgb.dask._find_n_open_ports(n_workers)
1351
1352
1353
        dask_model = dask_model_factory(
            n_estimators=5,
            num_leaves=5,
1354
            machines=",".join([f"{workers_hostname}:{port}" for port in open_ports]),
1355
1356
1357
1358
        )

        # test that "machines" is actually respected by creating a socket that uses
        # one of the ports mentioned in "machines"
1359
        error_msg = f"Binding port {open_ports[0]} failed"
1360
1361
        with pytest.raises(lgb.basic.LightGBMError, match=error_msg):
            with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
1362
                s.bind((workers_hostname, open_ports[0]))
1363
                dask_model.fit(dX, dy, group=dg)
1364

1365
1366
1367
        # The above error leaves a worker waiting
        client.restart()

1368
        # an informative error should be raised if "machines" has duplicates
1369
        one_open_port = lgb.dask._find_n_open_ports(1)
1370
        dask_model.set_params(machines=",".join([f"127.0.0.1:{one_open_port}" for _ in range(n_workers)]))
1371
1372
1373
        with pytest.raises(ValueError, match="Found duplicates in 'machines'"):
            dask_model.fit(dX, dy, group=dg)

1374

1375
1376
1377
1378
1379
@pytest.mark.parametrize(
    "classes",
    [
        (lgb.DaskLGBMClassifier, lgb.LGBMClassifier),
        (lgb.DaskLGBMRegressor, lgb.LGBMRegressor),
1380
1381
        (lgb.DaskLGBMRanker, lgb.LGBMRanker),
    ],
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
)
def test_dask_classes_and_sklearn_equivalents_have_identical_constructors_except_client_arg(classes):
    dask_spec = inspect.getfullargspec(classes[0])
    sklearn_spec = inspect.getfullargspec(classes[1])
    assert dask_spec.varargs == sklearn_spec.varargs
    assert dask_spec.varkw == sklearn_spec.varkw
    assert dask_spec.kwonlyargs == sklearn_spec.kwonlyargs
    assert dask_spec.kwonlydefaults == sklearn_spec.kwonlydefaults

    # "client" should be the only different, and the final argument
    assert dask_spec.args[:-1] == sklearn_spec.args
    assert dask_spec.defaults[:-1] == sklearn_spec.defaults
1394
    assert dask_spec.args[-1] == "client"
1395
    assert dask_spec.defaults[-1] is None
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406


@pytest.mark.parametrize(
    "methods",
    [
        (lgb.DaskLGBMClassifier.fit, lgb.LGBMClassifier.fit),
        (lgb.DaskLGBMClassifier.predict, lgb.LGBMClassifier.predict),
        (lgb.DaskLGBMClassifier.predict_proba, lgb.LGBMClassifier.predict_proba),
        (lgb.DaskLGBMRegressor.fit, lgb.LGBMRegressor.fit),
        (lgb.DaskLGBMRegressor.predict, lgb.LGBMRegressor.predict),
        (lgb.DaskLGBMRanker.fit, lgb.LGBMRanker.fit),
1407
1408
        (lgb.DaskLGBMRanker.predict, lgb.LGBMRanker.predict),
    ],
1409
1410
1411
1412
1413
1414
)
def test_dask_methods_and_sklearn_equivalents_have_similar_signatures(methods):
    dask_spec = inspect.getfullargspec(methods[0])
    sklearn_spec = inspect.getfullargspec(methods[1])
    dask_params = inspect.signature(methods[0]).parameters
    sklearn_params = inspect.signature(methods[1]).parameters
1415
    assert dask_spec.args == sklearn_spec.args[: len(dask_spec.args)]
1416
1417
    assert dask_spec.varargs == sklearn_spec.varargs
    if sklearn_spec.varkw:
1418
        assert dask_spec.varkw == sklearn_spec.varkw[: len(dask_spec.varkw)]
1419
1420
1421
1422
1423
    assert dask_spec.kwonlyargs == sklearn_spec.kwonlyargs
    assert dask_spec.kwonlydefaults == sklearn_spec.kwonlydefaults
    for param in dask_spec.args:
        error_msg = f"param '{param}' has different default values in the methods"
        assert dask_params[param].default == sklearn_params[param].default, error_msg
1424
1425


1426
@pytest.mark.parametrize("task", tasks)
1427
def test_training_succeeds_when_data_is_dataframe_and_label_is_column_array(task, cluster):
1428
    with Client(cluster):
1429
        _, _, _, _, dX, dy, dw, dg = _create_data(objective=task, output="dataframe", group=None)
1430
1431
1432
1433
1434
1435
1436

        model_factory = task_to_dask_factory[task]

        dy = dy.to_dask_array(lengths=True)
        dy_col_array = dy.reshape(-1, 1)
        assert len(dy_col_array.shape) == 2 and dy_col_array.shape[1] == 1

1437
        params = {"n_estimators": 1, "num_leaves": 3, "random_state": 0, "time_out": 5}
1438
1439
1440
        model = model_factory(**params)
        model.fit(dX, dy_col_array, sample_weight=dw, group=dg)
        assert model.fitted_
1441
1442


1443
1444
@pytest.mark.parametrize("task", tasks)
@pytest.mark.parametrize("output", data_output)
1445
def test_init_score(task, output, cluster, rng):
1446
1447
    if task == "ranking" and output == "scipy_csr_matrix":
        pytest.skip("LGBMRanker is not currently tested on sparse matrices")
1448

1449
    with Client(cluster) as client:
1450
        _, _, _, _, dX, dy, dw, dg = _create_data(objective=task, output=output, group=None)
1451

1452
1453
        model_factory = task_to_dask_factory[task]

1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
        params = {
            "n_estimators": 1,
            "num_leaves": 2,
            "time_out": 5,
            "seed": 708,
            "deterministic": True,
            "force_row_wise": True,
            "num_thread": 1,
        }
        num_classes = 1
1464
        if task == "multiclass-classification":
1465
            num_classes = 3
1466

1467
        if output.startswith("dataframe"):
1468
            init_scores = dy.map_partitions(lambda x: pd.DataFrame(rng.uniform(size=(x.size, num_classes))))
1469
        else:
1470
1471
            init_scores = dy.map_blocks(lambda x: rng.uniform(size=(x.size, num_classes)))

1472
        model = model_factory(client=client, **params)
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
        model.fit(dX, dy, sample_weight=dw, group=dg)
        pred = model.predict(dX, raw_score=True)

        model_init_score = model_factory(client=client, **params)
        model_init_score.fit(dX, dy, sample_weight=dw, init_score=init_scores, group=dg)
        pred_init_score = model_init_score.predict(dX, raw_score=True)

        # check if init score changes predictions
        with pytest.raises(AssertionError):
            assert_eq(pred, pred_init_score)
1483
1484


1485
def sklearn_checks_to_run():
1486
    check_names = ["check_estimator_get_tags_default_keys", "check_get_params_invariance", "check_set_params"]
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
    for check_name in check_names:
        check_func = getattr(sklearn_checks, check_name, None)
        if check_func:
            yield check_func


def _tested_estimators():
    for Estimator in [lgb.DaskLGBMClassifier, lgb.DaskLGBMRegressor]:
        yield Estimator()


@pytest.mark.parametrize("estimator", _tested_estimators())
@pytest.mark.parametrize("check", sklearn_checks_to_run())
1500
def test_sklearn_integration(estimator, check, cluster):
1501
    with Client(cluster):
1502
1503
1504
        estimator.set_params(local_listen_port=18000, time_out=5)
        name = type(estimator).__name__
        check(name, estimator)
1505
1506
1507
1508
1509


# this test is separate because it takes a not-yet-constructed estimator
@pytest.mark.parametrize("estimator", list(_tested_estimators()))
def test_parameters_default_constructible(estimator):
1510
    name = estimator.__class__.__name__
1511
    Estimator = estimator
1512
    sklearn_checks.check_parameters_default_constructible(name, Estimator)
1513
1514


1515
1516
@pytest.mark.parametrize("task", tasks)
@pytest.mark.parametrize("output", data_output)
1517
def test_predict_with_raw_score(task, output, cluster):
1518
1519
    if task == "ranking" and output == "scipy_csr_matrix":
        pytest.skip("LGBMRanker is not currently tested on sparse matrices")
1520

1521
    with Client(cluster) as client:
1522
        _, _, _, _, dX, dy, _, dg = _create_data(objective=task, output=output, group=None)
1523

1524
        model_factory = task_to_dask_factory[task]
1525
        params = {"client": client, "n_estimators": 1, "num_leaves": 2, "time_out": 5, "min_sum_hessian": 0}
1526
1527
1528
1529
1530
1531
        model = model_factory(**params)
        model.fit(dX, dy, group=dg)
        raw_predictions = model.predict(dX, raw_score=True).compute()

        trees_df = model.booster_.trees_to_dataframe()
        leaves_df = trees_df[trees_df.node_depth == 2]
1532
        if task == "multiclass-classification":
1533
1534
            for i in range(model.n_classes_):
                class_df = leaves_df[leaves_df.tree_index == i]
1535
                assert set(raw_predictions[:, i]) == set(class_df["value"])
1536
        else:
1537
            assert set(raw_predictions) == set(leaves_df["value"])
1538

1539
        if task.endswith("classification"):
1540
1541
            pred_proba_raw = model.predict_proba(dX, raw_score=True).compute()
            assert_eq(raw_predictions, pred_proba_raw)
1542
1543


1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
@pytest.mark.parametrize("output", data_output)
@pytest.mark.parametrize("use_init_score", [False, True])
def test_predict_stump(output, use_init_score, cluster, rng):
    with Client(cluster) as client:
        _, _, _, _, dX, dy, _, _ = _create_data(objective="binary-classification", n_samples=1_000, output=output)

        params = {"objective": "binary", "n_estimators": 5, "min_data_in_leaf": 1_000}

        if not use_init_score:
            init_scores = None
        elif output.startswith("dataframe"):
            init_scores = dy.map_partitions(lambda x: pd.DataFrame(rng.uniform(size=x.size)))
        else:
            init_scores = dy.map_blocks(lambda x: rng.uniform(size=x.size))

        model = lgb.DaskLGBMClassifier(client=client, **params)
        model.fit(dX, dy, init_score=init_scores)
        preds_1 = model.predict(dX, raw_score=True, num_iteration=1).compute()
        preds_all = model.predict(dX, raw_score=True).compute()

        if use_init_score:
            # if init_score was provided, a model of stumps should predict all 0s
            all_zeroes = np.full_like(preds_1, fill_value=0.0)
            assert_eq(preds_1, all_zeroes)
            assert_eq(preds_all, all_zeroes)
        else:
            # if init_score was not provided, prediction for a model of stumps should be
            # the "average" of the labels
            y_avg = np.log(dy.mean() / (1.0 - dy.mean()))
            assert_eq(preds_1, np.full_like(preds_1, fill_value=y_avg))
            assert_eq(preds_all, np.full_like(preds_all, fill_value=y_avg))


1577
def test_distributed_quantized_training(tmp_path, cluster):
1578
    with Client(cluster) as client:
1579
        X, y, w, _, dX, dy, dw, _ = _create_data(objective="regression", output="array")
1580

1581
        np.savetxt(tmp_path / "data_dask.csv", np.hstack([np.array([y]).T, X]), fmt="%f,%f,%f,%f,%f")
1582
1583

        params = {
1584
            "boosting_type": "gbdt",
1585
1586
            "n_estimators": 50,
            "num_leaves": 31,
1587
1588
1589
1590
            "use_quantized_grad": True,
            "num_grad_quant_bins": 30,
            "quant_train_renew_leaf": True,
            "verbose": -1,
1591
1592
        }

1593
        quant_dask_classifier = lgb.DaskLGBMRegressor(client=client, time_out=5, **params)
1594
1595
1596
1597
1598
        quant_dask_classifier = quant_dask_classifier.fit(dX, dy, sample_weight=dw)
        quant_p1 = quant_dask_classifier.predict(dX)
        quant_rmse = np.sqrt(np.mean((quant_p1.compute() - y) ** 2))

        params["use_quantized_grad"] = False
1599
        dask_classifier = lgb.DaskLGBMRegressor(client=client, time_out=5, **params)
1600
1601
1602
1603
        dask_classifier = dask_classifier.fit(dX, dy, sample_weight=dw)
        p1 = dask_classifier.predict(dX)
        rmse = np.sqrt(np.mean((p1.compute() - y) ** 2))
        assert quant_rmse < rmse + 7.0