dask.py 65.5 KB
Newer Older
1
# coding: utf-8
2
"""Distributed training with LightGBM and dask.distributed.
3

4
This module enables you to perform distributed training with LightGBM on
5
dask.Array and dask.DataFrame collections.
6
7

It is based on dask-lightgbm, which was based on dask-xgboost.
8
"""
9

10
import operator
11
import socket
12
from collections import defaultdict
13
from copy import deepcopy
14
from enum import Enum, auto
15
from functools import partial
16
from typing import Any, Dict, Iterable, List, Optional, Tuple, Type, Union
17
18
19
from urllib.parse import urlparse

import numpy as np
20
21
import scipy.sparse as ss

22
from .basic import LightGBMError, _choose_param_value, _ConfigAliases, _log_info, _log_warning
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
from .compat import (
    DASK_INSTALLED,
    PANDAS_INSTALLED,
    SKLEARN_INSTALLED,
    Client,
    Future,
    LGBMNotFittedError,
    concat,
    dask_Array,
    dask_array_from_delayed,
    dask_bag_from_delayed,
    dask_DataFrame,
    dask_Series,
    default_client,
    delayed,
    pd_DataFrame,
    pd_Series,
    wait,
)
from .sklearn import (
    LGBMClassifier,
    LGBMModel,
    LGBMRanker,
    LGBMRegressor,
    _LGBM_ScikitCustomObjectiveFunction,
    _LGBM_ScikitEvalMetricType,
    _lgbmmodel_doc_custom_eval_note,
    _lgbmmodel_doc_fit,
    _lgbmmodel_doc_predict,
)
53

54
__all__ = [
55
56
57
    "DaskLGBMClassifier",
    "DaskLGBMRanker",
    "DaskLGBMRegressor",
58
59
]

60
61
_DaskCollection = Union[dask_Array, dask_DataFrame, dask_Series]
_DaskMatrixLike = Union[dask_Array, dask_DataFrame]
62
_DaskVectorLike = Union[dask_Array, dask_Series]
63
64
_DaskPart = Union[np.ndarray, pd_DataFrame, pd_Series, ss.spmatrix]
_PredictionDtype = Union[Type[np.float32], Type[np.float64], Type[np.int32], Type[np.int64]]
65

66

67
68
69
70
class _RemoteSocket:
    def acquire(self) -> int:
        self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
71
        self.socket.bind(("", 0))
72
        return self.socket.getsockname()[1]
73

74
75
    def release(self) -> None:
        self.socket.close()
76

77
78
79
80
81

def _acquire_port() -> Tuple[_RemoteSocket, int]:
    s = _RemoteSocket()
    port = s.acquire()
    return s, port
82

83

84
85
86
87
88
89
90
91
92
93
94
95
class _DatasetNames(Enum):
    """Placeholder names used by lightgbm.dask internals to say 'also evaluate the training data'.

    Avoid duplicating the training data when the validation set refers to elements of training data.
    """

    TRAINSET = auto()
    SAMPLE_WEIGHT = auto()
    INIT_SCORE = auto()
    GROUP = auto()


96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
def _get_dask_client(client: Optional[Client]) -> Client:
    """Choose a Dask client to use.

    Parameters
    ----------
    client : dask.distributed.Client or None
        Dask client.

    Returns
    -------
    client : dask.distributed.Client
        A Dask client.
    """
    if client is None:
        return default_client()
    else:
        return client


115
116
def _assign_open_ports_to_workers(
    client: Client,
117
118
    workers: List[str],
) -> Tuple[Dict[str, Future], Dict[str, int]]:
119
120
121
122
    """Assign an open port to each worker.

    Returns
    -------
123
124
    worker_to_socket_future: dict
        mapping from worker address to a future pointing to the remote socket.
125
    worker_to_port: dict
126
        mapping from worker address to an open port in the worker's host.
127
    """
128
129
130
131
132
133
    # Acquire port in worker
    worker_to_future = {}
    for worker in workers:
        worker_to_future[worker] = client.submit(
            _acquire_port,
            workers=[worker],
134
            allow_other_workers=False,
135
            pure=False,
136
        )
137
138
139
140
141
142
143
144
145
146
147
148

    # schedule futures to retrieve each element of the tuple
    worker_to_socket_future = {}
    worker_to_port_future = {}
    for worker, socket_future in worker_to_future.items():
        worker_to_socket_future[worker] = client.submit(operator.itemgetter(0), socket_future)
        worker_to_port_future[worker] = client.submit(operator.itemgetter(1), socket_future)

    # retrieve ports
    worker_to_port = client.gather(worker_to_port_future)

    return worker_to_socket_future, worker_to_port
149
150


151
def _concat(seq: List[_DaskPart]) -> _DaskPart:
152
153
    if isinstance(seq[0], np.ndarray):
        return np.concatenate(seq, axis=0)
154
    elif isinstance(seq[0], (pd_DataFrame, pd_Series)):
155
        return concat(seq, axis=0)
156
    elif isinstance(seq[0], ss.spmatrix):
157
        return ss.vstack(seq, format="csr")
158
    else:
159
160
161
        raise TypeError(
            f"Data must be one of: numpy arrays, pandas dataframes, sparse matrices (from scipy). Got {type(seq[0]).__name__}."
        )
162
163


164
165
166
167
def _remove_list_padding(*args: Any) -> List[List[Any]]:
    return [[z for z in arg if z is not None] for arg in args]


168
def _pad_eval_names(lgbm_model: LGBMModel, required_names: List[str]) -> LGBMModel:
169
170
171
172
173
174
    """Append missing (key, value) pairs to a LightGBM model's evals_result_ and best_score_ OrderedDict attrs based on a set of required eval_set names.

    Allows users to rely on expected eval_set names being present when fitting DaskLGBM estimators with ``eval_set``.
    """
    for eval_name in required_names:
        if eval_name not in lgbm_model.evals_result_:
175
            lgbm_model.evals_result_[eval_name] = {}
176
        if eval_name not in lgbm_model.best_score_:
177
            lgbm_model.best_score_[eval_name] = {}
178
179
180
181

    return lgbm_model


182
def _train_part(
183
    *,
184
185
186
    params: Dict[str, Any],
    model_factory: Type[LGBMModel],
    list_of_parts: List[Dict[str, _DaskPart]],
187
188
189
    machines: str,
    local_listen_port: int,
    num_machines: int,
190
    return_model: bool,
191
    time_out: int,
192
    remote_socket: _RemoteSocket,
193
    **kwargs: Any,
194
) -> Optional[LGBMModel]:
195
    network_params = {
196
197
198
199
        "machines": machines,
        "local_listen_port": local_listen_port,
        "time_out": time_out,
        "num_machines": num_machines,
200
    }
201
202
    params.update(network_params)

203
204
    is_ranker = issubclass(model_factory, LGBMRanker)

205
    # Concatenate many parts into one
206
207
    data = _concat([x["data"] for x in list_of_parts])
    label = _concat([x["label"] for x in list_of_parts])
208

209
210
    if "weight" in list_of_parts[0]:
        weight = _concat([x["weight"] for x in list_of_parts])
211
212
213
    else:
        weight = None

214
215
    if "group" in list_of_parts[0]:
        group = _concat([x["group"] for x in list_of_parts])
216
217
    else:
        group = None
218

219
220
    if "init_score" in list_of_parts[0]:
        init_score = _concat([x["init_score"] for x in list_of_parts])
221
222
223
    else:
        init_score = None

224
    # construct local eval_set data.
225
226
227
    n_evals = max(len(x.get("eval_set", [])) for x in list_of_parts)
    eval_names = kwargs.pop("eval_names", None)
    eval_class_weight = kwargs.get("eval_class_weight")
228
229
230
231
232
233
234
    local_eval_set = None
    local_eval_names = None
    local_eval_sample_weight = None
    local_eval_init_score = None
    local_eval_group = None

    if n_evals:
235
236
        has_eval_sample_weight = any(x.get("eval_sample_weight") is not None for x in list_of_parts)
        has_eval_init_score = any(x.get("eval_init_score") is not None for x in list_of_parts)
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257

        local_eval_set = []
        evals_result_names = []
        if has_eval_sample_weight:
            local_eval_sample_weight = []
        if has_eval_init_score:
            local_eval_init_score = []
        if is_ranker:
            local_eval_group = []

        # store indices of eval_set components that were not contained within local parts.
        missing_eval_component_idx = []

        # consolidate parts of each individual eval component.
        for i in range(n_evals):
            x_e = []
            y_e = []
            w_e = []
            init_score_e = []
            g_e = []
            for part in list_of_parts:
258
                if not part.get("eval_set"):
259
260
261
262
263
264
265
                    continue

                # require that eval_name exists in evaluated result data in case dropped due to padding.
                # in distributed training the 'training' eval_set is not detected, will have name 'valid_<index>'.
                if eval_names:
                    evals_result_name = eval_names[i]
                else:
266
                    evals_result_name = f"valid_{i}"
267

268
                eval_set = part["eval_set"][i]
269
                if eval_set is _DatasetNames.TRAINSET:
270
271
                    x_e.append(part["data"])
                    y_e.append(part["label"])
272
273
274
275
276
277
278
                else:
                    x_e.extend(eval_set[0])
                    y_e.extend(eval_set[1])

                if evals_result_name not in evals_result_names:
                    evals_result_names.append(evals_result_name)

279
                eval_weight = part.get("eval_sample_weight")
280
281
                if eval_weight:
                    if eval_weight[i] is _DatasetNames.SAMPLE_WEIGHT:
282
                        w_e.append(part["weight"])
283
284
285
                    else:
                        w_e.extend(eval_weight[i])

286
                eval_init_score = part.get("eval_init_score")
287
288
                if eval_init_score:
                    if eval_init_score[i] is _DatasetNames.INIT_SCORE:
289
                        init_score_e.append(part["init_score"])
290
291
292
                    else:
                        init_score_e.extend(eval_init_score[i])

293
                eval_group = part.get("eval_group")
294
295
                if eval_group:
                    if eval_group[i] is _DatasetNames.GROUP:
296
                        g_e.append(part["group"])
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
                    else:
                        g_e.extend(eval_group[i])

            # filter padding from eval parts then _concat each eval_set component.
            x_e, y_e, w_e, init_score_e, g_e = _remove_list_padding(x_e, y_e, w_e, init_score_e, g_e)
            if x_e:
                local_eval_set.append((_concat(x_e), _concat(y_e)))
            else:
                missing_eval_component_idx.append(i)
                continue

            if w_e:
                local_eval_sample_weight.append(_concat(w_e))
            if init_score_e:
                local_eval_init_score.append(_concat(init_score_e))
            if g_e:
                local_eval_group.append(_concat(g_e))

        # reconstruct eval_set fit args/kwargs depending on which components of eval_set are on worker.
        eval_component_idx = [i for i in range(n_evals) if i not in missing_eval_component_idx]
        if eval_names:
            local_eval_names = [eval_names[i] for i in eval_component_idx]
        if eval_class_weight:
320
            kwargs["eval_class_weight"] = [eval_class_weight[i] for i in eval_component_idx]
321

322
    model = model_factory(**params)
323
324
    if remote_socket is not None:
        remote_socket.release()
325
    try:
326
        if is_ranker:
327
328
329
330
331
332
333
334
335
336
337
            model.fit(
                data,
                label,
                sample_weight=weight,
                init_score=init_score,
                group=group,
                eval_set=local_eval_set,
                eval_sample_weight=local_eval_sample_weight,
                eval_init_score=local_eval_init_score,
                eval_group=local_eval_group,
                eval_names=local_eval_names,
338
                **kwargs,
339
            )
340
        else:
341
342
343
344
345
346
347
348
349
            model.fit(
                data,
                label,
                sample_weight=weight,
                init_score=init_score,
                eval_set=local_eval_set,
                eval_sample_weight=local_eval_sample_weight,
                eval_init_score=local_eval_init_score,
                eval_names=local_eval_names,
350
                **kwargs,
351
            )
352

353
    finally:
354
355
        if getattr(model, "fitted_", False):
            model.booster_.free_network()
356

357
358
359
360
    if n_evals:
        # ensure that expected keys for evals_result_ and best_score_ exist regardless of padding.
        model = _pad_eval_names(model, required_names=evals_result_names)

361
362
363
    return model if return_model else None


364
def _split_to_parts(data: _DaskCollection, is_matrix: bool) -> List[_DaskPart]:
365
366
    parts = data.to_delayed()
    if isinstance(parts, np.ndarray):
367
368
369
370
        if is_matrix:
            assert parts.shape[1] == 1
        else:
            assert parts.ndim == 1 or parts.shape[1] == 1
371
372
373
374
        parts = parts.flatten().tolist()
    return parts


375
def _machines_to_worker_map(machines: str, worker_addresses: Iterable[str]) -> Dict[str, int]:
376
377
378
379
380
381
382
383
384
385
    """Create a worker_map from machines list.

    Given ``machines`` and a list of Dask worker addresses, return a mapping where the keys are
    ``worker_addresses`` and the values are ports from ``machines``.

    Parameters
    ----------
    machines : str
        A comma-delimited list of workers, of the form ``ip1:port,ip2:port``.
    worker_addresses : list of str
386
        An iterable of Dask worker addresses, of the form ``{protocol}{hostname}:{port}``, where ``port`` is the port Dask's scheduler uses to talk to that worker.
387
388
389
390
391
392
393

    Returns
    -------
    result : Dict[str, int]
        Dictionary where keys are work addresses in the form expected by Dask and values are a port for LightGBM to use.
    """
    machine_addresses = machines.split(",")
394
395

    if len(set(machine_addresses)) != len(machine_addresses):
396
397
398
        raise ValueError(
            f"Found duplicates in 'machines' ({machines}). Each entry in 'machines' must be a unique IP-port combination."
        )
399

400
401
402
403
404
405
406
407
    machine_to_port = defaultdict(set)
    for address in machine_addresses:
        host, port = address.split(":")
        machine_to_port[host].add(int(port))

    out = {}
    for address in worker_addresses:
        worker_host = urlparse(address).hostname
408
409
        if not worker_host:
            raise ValueError(f"Could not parse host name from worker address '{address}'")
410
411
412
413
414
        out[address] = machine_to_port[worker_host].pop()

    return out


415
def _train(
416
    *,
417
418
419
420
421
    client: Client,
    data: _DaskMatrixLike,
    label: _DaskCollection,
    params: Dict[str, Any],
    model_factory: Type[LGBMModel],
422
    sample_weight: Optional[_DaskVectorLike] = None,
423
    init_score: Optional[_DaskCollection] = None,
424
    group: Optional[_DaskVectorLike] = None,
425
426
    eval_set: Optional[List[Tuple[_DaskMatrixLike, _DaskCollection]]] = None,
    eval_names: Optional[List[str]] = None,
427
    eval_sample_weight: Optional[List[_DaskVectorLike]] = None,
428
    eval_class_weight: Optional[List[Union[dict, str]]] = None,
429
    eval_init_score: Optional[List[_DaskCollection]] = None,
430
    eval_group: Optional[List[_DaskVectorLike]] = None,
431
    eval_metric: Optional[_LGBM_ScikitEvalMetricType] = None,
432
    eval_at: Optional[Union[List[int], Tuple[int, ...]]] = None,
433
    **kwargs: Any,
434
) -> LGBMModel:
435
436
437
438
    """Inner train routine.

    Parameters
    ----------
439
440
    client : dask.distributed.Client
        Dask client.
441
    data : Dask Array or Dask DataFrame of shape = [n_samples, n_features]
442
        Input feature matrix.
443
    label : Dask Array, Dask DataFrame or Dask Series of shape = [n_samples]
444
445
        The target values (class labels in classification, real numbers in regression).
    params : dict
446
        Parameters passed to constructor of the local underlying model.
447
    model_factory : lightgbm.LGBMClassifier, lightgbm.LGBMRegressor, or lightgbm.LGBMRanker class
448
        Class of the local underlying model.
449
    sample_weight : Dask Array or Dask Series of shape = [n_samples] or None, optional (default=None)
450
        Weights of training data. Weights should be non-negative.
451
    init_score : Dask Array or Dask Series of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task), or Dask Array or Dask DataFrame of shape = [n_samples, n_classes] (for multi-class task), or None, optional (default=None)
452
        Init score of training data.
453
    group : Dask Array or Dask Series or None, optional (default=None)
454
455
456
457
458
        Group/query data.
        Only used in the learning-to-rank task.
        sum(group) = n_samples.
        For example, if you have a 100-document dataset with ``group = [10, 20, 40, 10, 10, 10]``, that means that you have 6 groups,
        where the first 10 records are in the first group, records 11-30 are in the second group, records 31-70 are in the third group, etc.
459
    eval_set : list of (X, y) tuples of Dask data collections, or None, optional (default=None)
460
461
462
        List of (X, y) tuple pairs to use as validation sets.
        Note, that not all workers may receive chunks of every eval set within ``eval_set``. When the returned
        lightgbm estimator is not trained using any chunks of a particular eval set, its corresponding component
463
        of ``evals_result_`` and ``best_score_`` will be empty dictionaries.
464
    eval_names : list of str, or None, optional (default=None)
465
        Names of eval_set.
466
    eval_sample_weight : list of Dask Array or Dask Series, or None, optional (default=None)
467
        Weights for each validation set in eval_set. Weights should be non-negative.
468
469
    eval_class_weight : list of dict or str, or None, optional (default=None)
        Class weights, one dict or str for each validation set in eval_set.
470
    eval_init_score : list of Dask Array, Dask Series or Dask DataFrame (for multi-class task), or None, optional (default=None)
471
        Initial model score for each validation set in eval_set.
472
    eval_group : list of Dask Array or Dask Series, or None, optional (default=None)
473
        Group/query for each validation set in eval_set.
474
475
    eval_metric : str, callable, list or None, optional (default=None)
        If str, it should be a built-in evaluation metric to use.
476
477
478
479
        If callable, it should be a custom evaluation metric, see note below for more details.
        If list, it can be a list of built-in metrics, a list of custom evaluation metrics, or a mix of both.
        In either case, the ``metric`` from the Dask model parameters (or inferred from the objective) will be evaluated and used as well.
        Default: 'l2' for DaskLGBMRegressor, 'binary(multi)_logloss' for DaskLGBMClassifier, 'ndcg' for DaskLGBMRanker.
480
    eval_at : list or tuple of int, optional (default=None)
481
        The evaluation positions of the specified ranking metric.
482
483
484
485
486
487
488
    **kwargs
        Other parameters passed to ``fit`` method of the local underlying model.

    Returns
    -------
    model : lightgbm.LGBMClassifier, lightgbm.LGBMRegressor, or lightgbm.LGBMRanker class
        Returns fitted underlying model.
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517

    Note
    ----

    This method handles setting up the following network parameters based on information
    about the Dask cluster referenced by ``client``.

    * ``local_listen_port``: port that each LightGBM worker opens a listening socket on,
            to accept connections from other workers. This can differ from LightGBM worker
            to LightGBM worker, but does not have to.
    * ``machines``: a comma-delimited list of all workers in the cluster, in the
            form ``ip:port,ip:port``. If running multiple Dask workers on the same host, use different
            ports for each worker. For example, for ``LocalCluster(n_workers=3)``, you might
            pass ``"127.0.0.1:12400,127.0.0.1:12401,127.0.0.1:12402"``.
    * ``num_machines``: number of LightGBM workers.
    * ``timeout``: time in minutes to wait before closing unused sockets.

    The default behavior of this function is to generate ``machines`` from the list of
    Dask workers which hold some piece of the training data, and to search for an open
    port on each worker to be used as ``local_listen_port``.

    If ``machines`` is provided explicitly in ``params``, this function uses the hosts
    and ports in that list directly, and does not do any searching. This means that if
    any of the Dask workers are missing from the list or any of those ports are not free
    when training starts, training will fail.

    If ``local_listen_port`` is provided in ``params`` and ``machines`` is not, this function
    constructs ``machines`` from the list of Dask workers which hold some piece of the
    training data, assuming that each one will use the same ``local_listen_port``.
518
    """
519
520
    params = deepcopy(params)

521
    # capture whether local_listen_port or its aliases were provided
522
    listen_port_in_params = any(alias in params for alias in _ConfigAliases.get("local_listen_port"))
523
524

    # capture whether machines or its aliases were provided
525
    machines_in_params = any(alias in params for alias in _ConfigAliases.get("machines"))
526
527
528
529

    params = _choose_param_value(
        main_param_name="tree_learner",
        params=params,
530
        default_value="data",
531
532
    )
    allowed_tree_learners = {
533
534
535
536
537
538
        "data",
        "data_parallel",
        "feature",
        "feature_parallel",
        "voting",
        "voting_parallel",
539
540
    }
    if params["tree_learner"] not in allowed_tree_learners:
541
542
543
544
        _log_warning(
            f'Parameter tree_learner set to {params["tree_learner"]}, which is not allowed. Using "data" as default'
        )
        params["tree_learner"] = "data"
545
546
547
548

    # Some passed-in parameters can be removed:
    #   * 'num_machines': set automatically from Dask worker list
    #   * 'num_threads': overridden to match nthreads on each Dask process
549
    for param_alias in _ConfigAliases.get("num_machines", "num_threads"):
550
551
552
        if param_alias in params:
            _log_warning(f"Parameter {param_alias} will be ignored.")
            params.pop(param_alias)
553

554
    # Split arrays/dataframes into parts. Arrange parts into dicts to enforce co-locality
555
556
    data_parts = _split_to_parts(data=data, is_matrix=True)
    label_parts = _split_to_parts(data=label, is_matrix=False)
557
    parts = [{"data": x, "label": y} for (x, y) in zip(data_parts, label_parts)]
558
    n_parts = len(parts)
559
560
561

    if sample_weight is not None:
        weight_parts = _split_to_parts(data=sample_weight, is_matrix=False)
562
        for i in range(n_parts):
563
            parts[i]["weight"] = weight_parts[i]
564
565
566

    if group is not None:
        group_parts = _split_to_parts(data=group, is_matrix=False)
567
        for i in range(n_parts):
568
            parts[i]["group"] = group_parts[i]
569

570
571
572
    if init_score is not None:
        init_score_parts = _split_to_parts(data=init_score, is_matrix=False)
        for i in range(n_parts):
573
            parts[i]["init_score"] = init_score_parts[i]
574

575
576
577
578
579
580
581
    # evals_set will to be re-constructed into smaller lists of (X, y) tuples, where
    # X and y are each delayed sub-lists of original eval dask Collections.
    if eval_set:
        # find maximum number of parts in an individual eval set so that we can
        # pad eval sets when they come in different sizes.
        n_largest_eval_parts = max(x[0].npartitions for x in eval_set)

582
        eval_sets: Dict[
583
            int, List[Union[_DatasetNames, Tuple[List[Optional[_DaskMatrixLike]], List[Optional[_DaskVectorLike]]]]]
584
        ] = defaultdict(list)
585
        if eval_sample_weight:
586
587
588
            eval_sample_weights: Dict[int, List[Union[_DatasetNames, List[Optional[_DaskVectorLike]]]]] = defaultdict(
                list
            )
589
        if eval_group:
590
            eval_groups: Dict[int, List[Union[_DatasetNames, List[Optional[_DaskVectorLike]]]]] = defaultdict(list)
591
        if eval_init_score:
592
            eval_init_scores: Dict[int, List[Union[_DatasetNames, List[Optional[_DaskMatrixLike]]]]] = defaultdict(list)
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619

        for i, (X_eval, y_eval) in enumerate(eval_set):
            n_this_eval_parts = X_eval.npartitions

            # when individual eval set is equivalent to training data, skip recomputing parts.
            if X_eval is data and y_eval is label:
                for parts_idx in range(n_parts):
                    eval_sets[parts_idx].append(_DatasetNames.TRAINSET)
            else:
                eval_x_parts = _split_to_parts(data=X_eval, is_matrix=True)
                eval_y_parts = _split_to_parts(data=y_eval, is_matrix=False)
                for j in range(n_largest_eval_parts):
                    parts_idx = j % n_parts

                    # add None-padding for individual eval_set member if it is smaller than the largest member.
                    if j < n_this_eval_parts:
                        x_e = eval_x_parts[j]
                        y_e = eval_y_parts[j]
                    else:
                        x_e = None
                        y_e = None

                    if j < n_parts:
                        # first time a chunk of this eval set is added to this part.
                        eval_sets[parts_idx].append(([x_e], [y_e]))
                    else:
                        # append additional chunks of this eval set to this part.
620
621
                        eval_sets[parts_idx][-1][0].append(x_e)  # type: ignore[index, union-attr]
                        eval_sets[parts_idx][-1][1].append(y_e)  # type: ignore[index, union-attr]
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640

            if eval_sample_weight:
                if eval_sample_weight[i] is sample_weight:
                    for parts_idx in range(n_parts):
                        eval_sample_weights[parts_idx].append(_DatasetNames.SAMPLE_WEIGHT)
                else:
                    eval_w_parts = _split_to_parts(data=eval_sample_weight[i], is_matrix=False)

                    # ensure that all evaluation parts map uniquely to one part.
                    for j in range(n_largest_eval_parts):
                        if j < n_this_eval_parts:
                            w_e = eval_w_parts[j]
                        else:
                            w_e = None

                        parts_idx = j % n_parts
                        if j < n_parts:
                            eval_sample_weights[parts_idx].append([w_e])
                        else:
641
                            eval_sample_weights[parts_idx][-1].append(w_e)  # type: ignore[union-attr]
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658

            if eval_init_score:
                if eval_init_score[i] is init_score:
                    for parts_idx in range(n_parts):
                        eval_init_scores[parts_idx].append(_DatasetNames.INIT_SCORE)
                else:
                    eval_init_score_parts = _split_to_parts(data=eval_init_score[i], is_matrix=False)
                    for j in range(n_largest_eval_parts):
                        if j < n_this_eval_parts:
                            init_score_e = eval_init_score_parts[j]
                        else:
                            init_score_e = None

                        parts_idx = j % n_parts
                        if j < n_parts:
                            eval_init_scores[parts_idx].append([init_score_e])
                        else:
659
                            eval_init_scores[parts_idx][-1].append(init_score_e)  # type: ignore[union-attr]
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676

            if eval_group:
                if eval_group[i] is group:
                    for parts_idx in range(n_parts):
                        eval_groups[parts_idx].append(_DatasetNames.GROUP)
                else:
                    eval_g_parts = _split_to_parts(data=eval_group[i], is_matrix=False)
                    for j in range(n_largest_eval_parts):
                        if j < n_this_eval_parts:
                            g_e = eval_g_parts[j]
                        else:
                            g_e = None

                        parts_idx = j % n_parts
                        if j < n_parts:
                            eval_groups[parts_idx].append([g_e])
                        else:
677
                            eval_groups[parts_idx][-1].append(g_e)  # type: ignore[union-attr]
678
679
680

        # assign sub-eval_set components to worker parts.
        for parts_idx, e_set in eval_sets.items():
681
            parts[parts_idx]["eval_set"] = e_set
682
            if eval_sample_weight:
683
                parts[parts_idx]["eval_sample_weight"] = eval_sample_weights[parts_idx]
684
            if eval_init_score:
685
                parts[parts_idx]["eval_init_score"] = eval_init_scores[parts_idx]
686
            if eval_group:
687
                parts[parts_idx]["eval_group"] = eval_groups[parts_idx]
688

689
    # Start computation in the background
690
    parts = list(map(delayed, parts))
691
692
693
694
    parts = client.compute(parts)
    wait(parts)

    for part in parts:
695
        if part.status == "error":  # type: ignore
696
697
            # trigger error locally
            return part  # type: ignore[return-value]
698
699

    # Find locations of all parts and map them to particular Dask workers
700
    key_to_part_dict = {part.key: part for part in parts}  # type: ignore
701
702
703
704
705
    who_has = client.who_has(parts)
    worker_map = defaultdict(list)
    for key, workers in who_has.items():
        worker_map[next(iter(workers))].append(key_to_part_dict[key])

706
707
708
709
710
711
    # Check that all workers were provided some of eval_set. Otherwise warn user that validation
    # data artifacts may not be populated depending on worker returning final estimator.
    if eval_set:
        for worker in worker_map:
            has_eval_set = False
            for part in worker_map[worker]:
712
                if "eval_set" in part.result():  # type: ignore[attr-defined]
713
714
715
716
717
718
719
720
721
722
723
                    has_eval_set = True
                    break

            if not has_eval_set:
                _log_warning(
                    f"Worker {worker} was not allocated eval_set data. Therefore evals_result_ and best_score_ data may be unreliable. "
                    "Try rebalancing data across workers."
                )

    # assign general validation set settings to fit kwargs.
    if eval_names:
724
        kwargs["eval_names"] = eval_names
725
    if eval_class_weight:
726
        kwargs["eval_class_weight"] = eval_class_weight
727
    if eval_metric:
728
        kwargs["eval_metric"] = eval_metric
729
    if eval_at:
730
        kwargs["eval_at"] = eval_at
731

732
733
734
    master_worker = next(iter(worker_map))
    worker_ncores = client.ncores()

735
736
737
738
739
    # resolve aliases for network parameters and pop the result off params.
    # these values are added back in calls to `_train_part()`
    params = _choose_param_value(
        main_param_name="local_listen_port",
        params=params,
740
        default_value=12400,
741
    )
742
743
744
745
746
    local_listen_port = params.pop("local_listen_port")

    params = _choose_param_value(
        main_param_name="machines",
        params=params,
747
        default_value=None,
748
749
750
751
    )
    machines = params.pop("machines")

    # figure out network params
752
    worker_to_socket_future: Dict[str, Future] = {}
753
754
755
756
757
    worker_addresses = worker_map.keys()
    if machines is not None:
        _log_info("Using passed-in 'machines' parameter")
        worker_address_to_port = _machines_to_worker_map(
            machines=machines,
758
            worker_addresses=worker_addresses,
759
760
761
762
        )
    else:
        if listen_port_in_params:
            _log_info("Using passed-in 'local_listen_port' for all workers")
763
            unique_hosts = {urlparse(a).hostname for a in worker_addresses}
764
765
766
767
768
769
770
771
            if len(unique_hosts) < len(worker_addresses):
                msg = (
                    "'local_listen_port' was provided in Dask training parameters, but at least one "
                    "machine in the cluster has multiple Dask worker processes running on it. Please omit "
                    "'local_listen_port' or pass 'machines'."
                )
                raise LightGBMError(msg)

772
            worker_address_to_port = dict.fromkeys(worker_addresses, local_listen_port)
773
774
        else:
            _log_info("Finding random open ports for workers")
775
776
777
            worker_to_socket_future, worker_address_to_port = _assign_open_ports_to_workers(
                client, list(worker_map.keys())
            )
778

779
780
781
        machines = ",".join(
            [f"{urlparse(worker_address).hostname}:{port}" for worker_address, port in worker_address_to_port.items()]
        )
782
783

    num_machines = len(worker_address_to_port)
784

785
    # Tell each worker to train on the parts that it has locally
786
    #
787
    # This code treats ``_train_part()`` calls as not "pure" because:
788
    #     1. there is randomness in the training process unless parameters ``seed``
789
    #        and ``deterministic`` are set
790
791
792
    #     2. even with those parameters set, the output of one ``_train_part()`` call
    #        relies on global state (it and all the other LightGBM training processes
    #        coordinate with each other)
793
794
795
796
    futures_classifiers = [
        client.submit(
            _train_part,
            model_factory=model_factory,
797
            params={**params, "num_threads": worker_ncores[worker]},
798
            list_of_parts=list_of_parts,
799
800
801
            machines=machines,
            local_listen_port=worker_address_to_port[worker],
            num_machines=num_machines,
802
            time_out=params.get("time_out", 120),
803
            remote_socket=worker_to_socket_future.get(worker, None),
804
            return_model=(worker == master_worker),
805
806
807
            workers=[worker],
            allow_other_workers=False,
            pure=False,
808
            **kwargs,
809
810
811
        )
        for worker, list_of_parts in worker_map.items()
    ]
812
813
814

    results = client.gather(futures_classifiers)
    results = [v for v in results if v]
815
816
817
    model = results[0]

    # if network parameters were changed during training, remove them from the
Andrew Ziem's avatar
Andrew Ziem committed
818
    # returned model so that they're generated dynamically on every run based
819
820
821
    # on the Dask cluster you're connected to and which workers have pieces of
    # the training data
    if not listen_port_in_params:
822
        for param in _ConfigAliases.get("local_listen_port"):
823
824
825
            model._other_params.pop(param, None)

    if not machines_in_params:
826
        for param in _ConfigAliases.get("machines"):
827
828
            model._other_params.pop(param, None)

829
    for param in _ConfigAliases.get("num_machines", "timeout"):
830
831
832
        model._other_params.pop(param, None)

    return model
833
834


835
836
def _predict_part(
    part: _DaskPart,
837
    *,
838
839
840
841
842
    model: LGBMModel,
    raw_score: bool,
    pred_proba: bool,
    pred_leaf: bool,
    pred_contrib: bool,
843
    **kwargs: Any,
844
) -> _DaskPart:
845
    result: _DaskPart
846
    if part.shape[0] == 0:
847
        result = np.array([])
848
849
    elif pred_proba:
        result = model.predict_proba(
850
            part,
851
852
853
            raw_score=raw_score,
            pred_leaf=pred_leaf,
            pred_contrib=pred_contrib,
854
            **kwargs,
855
        )
856
    else:
857
        result = model.predict(
858
            part,
859
860
861
            raw_score=raw_score,
            pred_leaf=pred_leaf,
            pred_contrib=pred_contrib,
862
            **kwargs,
863
        )
864

865
    # dask.DataFrame.map_partitions() expects each call to return a pandas DataFrame or Series
866
    if isinstance(part, pd_DataFrame):
867
        if len(result.shape) == 2:
868
            result = pd_DataFrame(result, index=part.index)
869
        else:
870
            result = pd_Series(result, index=part.index, name="predictions")
871
872
873
874

    return result


875
def _predict(
876
    *,
877
878
    model: LGBMModel,
    data: _DaskMatrixLike,
879
    client: Client,
880
881
882
883
884
    raw_score: bool = False,
    pred_proba: bool = False,
    pred_leaf: bool = False,
    pred_contrib: bool = False,
    dtype: _PredictionDtype = np.float32,
885
    **kwargs: Any,
886
) -> Union[dask_Array, List[dask_Array]]:
887
888
889
890
    """Inner predict routine.

    Parameters
    ----------
891
    model : lightgbm.LGBMClassifier, lightgbm.LGBMRegressor, or lightgbm.LGBMRanker class
892
        Fitted underlying model.
893
    data : Dask Array or Dask DataFrame of shape = [n_samples, n_features]
894
        Input feature matrix.
895
896
    raw_score : bool, optional (default=False)
        Whether to predict raw scores.
897
898
899
900
901
902
    pred_proba : bool, optional (default=False)
        Should method return results of ``predict_proba`` (``pred_proba=True``) or ``predict`` (``pred_proba=False``).
    pred_leaf : bool, optional (default=False)
        Whether to predict leaf index.
    pred_contrib : bool, optional (default=False)
        Whether to predict feature contributions.
903
    dtype : np.dtype, optional (default=np.float32)
904
        Dtype of the output.
905
    **kwargs
906
        Other parameters passed to ``predict`` or ``predict_proba`` method.
907
908
909

    Returns
    -------
910
    predicted_result : Dask Array of shape = [n_samples] or shape = [n_samples, n_classes]
911
        The predicted values.
912
    X_leaves : Dask Array of shape = [n_samples, n_trees] or shape = [n_samples, n_trees * n_classes]
913
        If ``pred_leaf=True``, the predicted leaf of every tree for each sample.
914
    X_SHAP_values : Dask Array of shape = [n_samples, n_features + 1] or shape = [n_samples, (n_features + 1) * n_classes] or (if multi-class and using sparse inputs) a list of ``n_classes`` Dask Arrays of shape = [n_samples, n_features + 1]
915
        If ``pred_contrib=True``, the feature contributions for each sample.
916
    """
917
    if not all((DASK_INSTALLED, PANDAS_INSTALLED, SKLEARN_INSTALLED)):
918
        raise LightGBMError("dask, pandas and scikit-learn are required for lightgbm.dask")
919
    if isinstance(data, dask_DataFrame):
920
921
922
923
924
925
926
        return data.map_partitions(
            _predict_part,
            model=model,
            raw_score=raw_score,
            pred_proba=pred_proba,
            pred_leaf=pred_leaf,
            pred_contrib=pred_contrib,
927
            **kwargs,
928
        ).values
929
    elif isinstance(data, dask_Array):
930
931
        # for multi-class classification with sparse matrices, pred_contrib predictions
        # are returned as a list of sparse matrices (one per class)
932
        num_classes = model._n_classes
933

934
        if num_classes > 2 and pred_contrib and isinstance(data._meta, ss.spmatrix):
935
936
937
938
939
940
941
            predict_function = partial(
                _predict_part,
                model=model,
                raw_score=False,
                pred_proba=pred_proba,
                pred_leaf=False,
                pred_contrib=True,
942
                **kwargs,
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
            )

            delayed_chunks = data.to_delayed()
            bag = dask_bag_from_delayed(delayed_chunks[:, 0])

            @delayed
            def _extract(items: List[Any], i: int) -> Any:
                return items[i]

            preds = bag.map_partitions(predict_function)

            # pred_contrib output will have one column per feature,
            # plus one more for the base value
            num_cols = model.n_features_ + 1

            nrows_per_chunk = data.chunks[0]
959
            out: List[List[dask_Array]] = [[] for _ in range(num_classes)]
960
961
962
963
964
965
966
967
968

            # need to tell Dask the expected type and shape of individual preds
            pred_meta = data._meta

            for j, partition in enumerate(preds.to_delayed()):
                for i in range(num_classes):
                    part = dask_array_from_delayed(
                        value=_extract(partition, i),
                        shape=(nrows_per_chunk[j], num_cols),
969
                        meta=pred_meta,
970
971
972
973
                    )
                    out[i].append(part)

            # by default, dask.array.concatenate() concatenates sparse arrays into a COO matrix
974
            # the code below is used instead to ensure that the sparse type is preserved during concatenation
975
            if isinstance(pred_meta, ss.csr_matrix):
976
                concat_fn = partial(ss.vstack, format="csr")
977
            elif isinstance(pred_meta, ss.csc_matrix):
978
                concat_fn = partial(ss.vstack, format="csc")
979
980
981
982
983
            else:
                concat_fn = ss.vstack

            # At this point, `out` is a list of lists of delayeds (each of which points to a matrix).
            # Concatenate them to return a list of Dask Arrays.
984
            out_arrays: List[dask_Array] = []
985
            for i in range(num_classes):
986
987
988
989
                out_arrays.append(
                    dask_array_from_delayed(
                        value=delayed(concat_fn)(out[i]),
                        shape=(data.shape[0], num_cols),
990
                        meta=pred_meta,
991
                    )
992
993
                )

994
            return out_arrays
995

996
997
        data_row = client.compute(data[[0]]).result()
        predict_fn = partial(
998
999
1000
1001
1002
1003
            _predict_part,
            model=model,
            raw_score=raw_score,
            pred_proba=pred_proba,
            pred_leaf=pred_leaf,
            pred_contrib=pred_contrib,
1004
1005
1006
            **kwargs,
        )
        pred_row = predict_fn(data_row)
1007
        chunks: Tuple[int, ...] = (data.chunks[0],)
1008
1009
1010
1011
        map_blocks_kwargs = {}
        if len(pred_row.shape) > 1:
            chunks += (pred_row.shape[1],)
        else:
1012
            map_blocks_kwargs["drop_axis"] = 1
1013
1014
1015
1016
        return data.map_blocks(
            predict_fn,
            chunks=chunks,
            meta=pred_row,
1017
            dtype=dtype,
1018
            **map_blocks_kwargs,
1019
        )
1020
    else:
1021
        raise TypeError(f"Data must be either Dask Array or Dask DataFrame. Got {type(data).__name__}.")
1022
1023


1024
class _DaskLGBMModel:
1025
1026
    @property
    def client_(self) -> Client:
1027
        """:obj:`dask.distributed.Client`: Dask client.
1028
1029
1030
1031
1032

        This property can be passed in the constructor or updated
        with ``model.set_params(client=client)``.
        """
        if not getattr(self, "fitted_", False):
1033
            raise LGBMNotFittedError("Cannot access property client_ before calling fit().")
1034
1035
1036

        return _get_dask_client(client=self.client)

1037
    def _lgb_dask_getstate(self) -> Dict[Any, Any]:
1038
1039
        """Remove un-picklable attributes before serialization."""
        client = self.__dict__.pop("client", None)
1040
        self._other_params.pop("client", None)  # type: ignore[attr-defined]
1041
        out = deepcopy(self.__dict__)
1042
        out.update({"client": None})
1043
1044
1045
        self.client = client
        return out

1046
    def _lgb_dask_fit(
1047
        self,
1048
        *,
1049
1050
1051
        model_factory: Type[LGBMModel],
        X: _DaskMatrixLike,
        y: _DaskCollection,
1052
        sample_weight: Optional[_DaskVectorLike] = None,
1053
        init_score: Optional[_DaskCollection] = None,
1054
        group: Optional[_DaskVectorLike] = None,
1055
1056
        eval_set: Optional[List[Tuple[_DaskMatrixLike, _DaskCollection]]] = None,
        eval_names: Optional[List[str]] = None,
1057
        eval_sample_weight: Optional[List[_DaskVectorLike]] = None,
1058
        eval_class_weight: Optional[List[Union[dict, str]]] = None,
1059
        eval_init_score: Optional[List[_DaskCollection]] = None,
1060
        eval_group: Optional[List[_DaskVectorLike]] = None,
1061
        eval_metric: Optional[_LGBM_ScikitEvalMetricType] = None,
1062
        eval_at: Optional[Union[List[int], Tuple[int, ...]]] = None,
1063
        **kwargs: Any,
1064
    ) -> "_DaskLGBMModel":
1065
        if not DASK_INSTALLED:
1066
            raise LightGBMError("dask is required for lightgbm.dask")
1067
        if not all((DASK_INSTALLED, PANDAS_INSTALLED, SKLEARN_INSTALLED)):
1068
            raise LightGBMError("dask, pandas and scikit-learn are required for lightgbm.dask")
1069

1070
        params = self.get_params(True)  # type: ignore[attr-defined]
1071
        params.pop("client", None)
1072
1073

        model = _train(
1074
            client=_get_dask_client(self.client),
1075
1076
1077
1078
1079
            data=X,
            label=y,
            params=params,
            model_factory=model_factory,
            sample_weight=sample_weight,
1080
            init_score=init_score,
1081
            group=group,
1082
1083
1084
1085
1086
1087
1088
1089
            eval_set=eval_set,
            eval_names=eval_names,
            eval_sample_weight=eval_sample_weight,
            eval_class_weight=eval_class_weight,
            eval_init_score=eval_init_score,
            eval_group=eval_group,
            eval_metric=eval_metric,
            eval_at=eval_at,
1090
            **kwargs,
1091
        )
1092

1093
1094
        self.set_params(**model.get_params())  # type: ignore[attr-defined]
        self._lgb_dask_copy_extra_params(model, self)  # type: ignore[attr-defined]
1095
1096
1097

        return self

1098
    def _lgb_dask_to_local(self, model_factory: Type[LGBMModel]) -> LGBMModel:
1099
        params = self.get_params()  # type: ignore[attr-defined]
1100
1101
        params.pop("client", None)
        model = model_factory(**params)
1102
        self._lgb_dask_copy_extra_params(self, model)
1103
        model._other_params.pop("client", None)
1104
1105
1106
        return model

    @staticmethod
1107
1108
1109
1110
    def _lgb_dask_copy_extra_params(
        source: Union["_DaskLGBMModel", LGBMModel],
        dest: Union["_DaskLGBMModel", LGBMModel],
    ) -> None:
1111
        params = source.get_params()  # type: ignore[union-attr]
1112
1113
1114
        attributes = source.__dict__
        extra_param_names = set(attributes.keys()).difference(params.keys())
        for name in extra_param_names:
1115
            setattr(dest, name, attributes[name])
1116
1117


1118
class DaskLGBMClassifier(LGBMClassifier, _DaskLGBMModel):
1119
1120
    """Distributed version of lightgbm.LGBMClassifier."""

1121
1122
    def __init__(
        self,
1123
        *,
1124
        boosting_type: str = "gbdt",
1125
1126
1127
1128
1129
        num_leaves: int = 31,
        max_depth: int = -1,
        learning_rate: float = 0.1,
        n_estimators: int = 100,
        subsample_for_bin: int = 200000,
1130
        objective: Optional[Union[str, _LGBM_ScikitCustomObjectiveFunction]] = None,
1131
        class_weight: Optional[Union[dict, str]] = None,
1132
        min_split_gain: float = 0.0,
1133
1134
        min_child_weight: float = 1e-3,
        min_child_samples: int = 20,
1135
        subsample: float = 1.0,
1136
        subsample_freq: int = 0,
1137
1138
1139
1140
        colsample_bytree: float = 1.0,
        reg_alpha: float = 0.0,
        reg_lambda: float = 0.0,
        random_state: Optional[Union[int, np.random.RandomState, "np.random.Generator"]] = None,
1141
        n_jobs: Optional[int] = None,
1142
        importance_type: str = "split",
1143
        client: Optional[Client] = None,
1144
        **kwargs: Any,
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
    ):
        """Docstring is inherited from the lightgbm.LGBMClassifier.__init__."""
        self.client = client
        super().__init__(
            boosting_type=boosting_type,
            num_leaves=num_leaves,
            max_depth=max_depth,
            learning_rate=learning_rate,
            n_estimators=n_estimators,
            subsample_for_bin=subsample_for_bin,
            objective=objective,
            class_weight=class_weight,
            min_split_gain=min_split_gain,
            min_child_weight=min_child_weight,
            min_child_samples=min_child_samples,
            subsample=subsample,
            subsample_freq=subsample_freq,
            colsample_bytree=colsample_bytree,
            reg_alpha=reg_alpha,
            reg_lambda=reg_lambda,
            random_state=random_state,
            n_jobs=n_jobs,
            importance_type=importance_type,
1168
            **kwargs,
1169
1170
1171
        )

    _base_doc = LGBMClassifier.__init__.__doc__
1172
    _before_kwargs, _kwargs, _after_kwargs = _base_doc.partition("**kwargs")  # type: ignore
1173
    __init__.__doc__ = f"""
1174
        {_before_kwargs}client : dask.distributed.Client or None, optional (default=None)
1175
        {" ":4}Dask client. If ``None``, ``distributed.default_client()`` will be used at runtime. The Dask client used by this class will not be saved if the model object is pickled.
1176
1177
        {_kwargs}{_after_kwargs}
        """
1178
1179

    def __getstate__(self) -> Dict[Any, Any]:
1180
        return self._lgb_dask_getstate()
1181

1182
    def fit(  # type: ignore[override]
1183
1184
1185
        self,
        X: _DaskMatrixLike,
        y: _DaskCollection,
1186
        sample_weight: Optional[_DaskVectorLike] = None,
1187
        init_score: Optional[_DaskCollection] = None,
1188
1189
        eval_set: Optional[List[Tuple[_DaskMatrixLike, _DaskCollection]]] = None,
        eval_names: Optional[List[str]] = None,
1190
        eval_sample_weight: Optional[List[_DaskVectorLike]] = None,
1191
        eval_class_weight: Optional[List[Union[dict, str]]] = None,
1192
        eval_init_score: Optional[List[_DaskCollection]] = None,
1193
        eval_metric: Optional[_LGBM_ScikitEvalMetricType] = None,
1194
        **kwargs: Any,
1195
    ) -> "DaskLGBMClassifier":
1196
        """Docstring is inherited from the lightgbm.LGBMClassifier.fit."""
1197
        self._lgb_dask_fit(
1198
1199
1200
1201
            model_factory=LGBMClassifier,
            X=X,
            y=y,
            sample_weight=sample_weight,
1202
            init_score=init_score,
1203
1204
1205
1206
1207
1208
            eval_set=eval_set,
            eval_names=eval_names,
            eval_sample_weight=eval_sample_weight,
            eval_class_weight=eval_class_weight,
            eval_init_score=eval_init_score,
            eval_metric=eval_metric,
1209
            **kwargs,
1210
        )
1211
        return self
1212

1213
1214
1215
    _base_doc = _lgbmmodel_doc_fit.format(
        X_shape="Dask Array or Dask DataFrame of shape = [n_samples, n_features]",
        y_shape="Dask Array, Dask DataFrame or Dask Series of shape = [n_samples]",
1216
        sample_weight_shape="Dask Array or Dask Series of shape = [n_samples] or None, optional (default=None)",
1217
        init_score_shape="Dask Array or Dask Series of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task), or Dask Array or Dask DataFrame of shape = [n_samples, n_classes] (for multi-class task), or None, optional (default=None)",
1218
        group_shape="Dask Array or Dask Series or None, optional (default=None)",
1219
        eval_sample_weight_shape="list of Dask Array or Dask Series, or None, optional (default=None)",
1220
        eval_init_score_shape="list of Dask Array, Dask Series or Dask DataFrame (for multi-class task), or None, optional (default=None)",
1221
        eval_group_shape="list of Dask Array or Dask Series, or None, optional (default=None)",
1222
1223
    )

1224
    # DaskLGBMClassifier does not support group, eval_group.
1225
    _base_doc = _base_doc[: _base_doc.find("group :")] + _base_doc[_base_doc.find("eval_set :") :]
1226

1227
    _base_doc = _base_doc[: _base_doc.find("eval_group :")] + _base_doc[_base_doc.find("eval_metric :") :]
1228

1229
    # DaskLGBMClassifier support for callbacks and init_model is not tested
1230
    fit.__doc__ = f"""{_base_doc[: _base_doc.find("callbacks :")]}**kwargs
1231
        Other parameters passed through to ``LGBMClassifier.fit()``.
1232

1233
1234
1235
1236
1237
    Returns
    -------
    self : lightgbm.DaskLGBMClassifier
        Returns self.

1238
    {_lgbmmodel_doc_custom_eval_note}
1239
        """
1240

1241
1242
    def predict(
        self,
1243
        X: _DaskMatrixLike,  # type: ignore[override]
1244
1245
1246
1247
1248
1249
        raw_score: bool = False,
        start_iteration: int = 0,
        num_iteration: Optional[int] = None,
        pred_leaf: bool = False,
        pred_contrib: bool = False,
        validate_features: bool = False,
1250
        **kwargs: Any,
1251
    ) -> dask_Array:
1252
        """Docstring is inherited from the lightgbm.LGBMClassifier.predict."""
1253
1254
1255
1256
        return _predict(
            model=self.to_local(),
            data=X,
            dtype=self.classes_.dtype,
1257
            client=_get_dask_client(self.client),
1258
1259
1260
1261
1262
1263
            raw_score=raw_score,
            start_iteration=start_iteration,
            num_iteration=num_iteration,
            pred_leaf=pred_leaf,
            pred_contrib=pred_contrib,
            validate_features=validate_features,
1264
            **kwargs,
1265
1266
        )

1267
1268
1269
1270
1271
1272
    predict.__doc__ = _lgbmmodel_doc_predict.format(
        description="Return the predicted value for each sample.",
        X_shape="Dask Array or Dask DataFrame of shape = [n_samples, n_features]",
        output_name="predicted_result",
        predicted_result_shape="Dask Array of shape = [n_samples] or shape = [n_samples, n_classes]",
        X_leaves_shape="Dask Array of shape = [n_samples, n_trees] or shape = [n_samples, n_trees * n_classes]",
1273
        X_SHAP_values_shape="Dask Array of shape = [n_samples, n_features + 1] or shape = [n_samples, (n_features + 1) * n_classes] or (if multi-class and using sparse inputs) a list of ``n_classes`` Dask Arrays of shape = [n_samples, n_features + 1]",
1274
    )
1275

1276
1277
    def predict_proba(
        self,
1278
        X: _DaskMatrixLike,  # type: ignore[override]
1279
1280
1281
1282
1283
1284
        raw_score: bool = False,
        start_iteration: int = 0,
        num_iteration: Optional[int] = None,
        pred_leaf: bool = False,
        pred_contrib: bool = False,
        validate_features: bool = False,
1285
        **kwargs: Any,
1286
    ) -> dask_Array:
1287
        """Docstring is inherited from the lightgbm.LGBMClassifier.predict_proba."""
1288
1289
1290
1291
        return _predict(
            model=self.to_local(),
            data=X,
            pred_proba=True,
1292
            client=_get_dask_client(self.client),
1293
1294
1295
1296
1297
1298
            raw_score=raw_score,
            start_iteration=start_iteration,
            num_iteration=num_iteration,
            pred_leaf=pred_leaf,
            pred_contrib=pred_contrib,
            validate_features=validate_features,
1299
            **kwargs,
1300
1301
        )

1302
1303
1304
1305
    predict_proba.__doc__ = _lgbmmodel_doc_predict.format(
        description="Return the predicted probability for each class for each sample.",
        X_shape="Dask Array or Dask DataFrame of shape = [n_samples, n_features]",
        output_name="predicted_probability",
1306
        predicted_result_shape="Dask Array of shape = [n_samples] or shape = [n_samples, n_classes]",
1307
        X_leaves_shape="Dask Array of shape = [n_samples, n_trees] or shape = [n_samples, n_trees * n_classes]",
1308
        X_SHAP_values_shape="Dask Array of shape = [n_samples, n_features + 1] or shape = [n_samples, (n_features + 1) * n_classes] or (if multi-class and using sparse inputs) a list of ``n_classes`` Dask Arrays of shape = [n_samples, n_features + 1]",
1309
    )
1310

1311
    def to_local(self) -> LGBMClassifier:
1312
1313
1314
1315
1316
        """Create regular version of lightgbm.LGBMClassifier from the distributed version.

        Returns
        -------
        model : lightgbm.LGBMClassifier
1317
            Local underlying model.
1318
        """
1319
        return self._lgb_dask_to_local(LGBMClassifier)
1320
1321


1322
class DaskLGBMRegressor(LGBMRegressor, _DaskLGBMModel):
1323
    """Distributed version of lightgbm.LGBMRegressor."""
1324

1325
1326
    def __init__(
        self,
1327
        *,
1328
        boosting_type: str = "gbdt",
1329
1330
1331
1332
1333
        num_leaves: int = 31,
        max_depth: int = -1,
        learning_rate: float = 0.1,
        n_estimators: int = 100,
        subsample_for_bin: int = 200000,
1334
        objective: Optional[Union[str, _LGBM_ScikitCustomObjectiveFunction]] = None,
1335
        class_weight: Optional[Union[dict, str]] = None,
1336
        min_split_gain: float = 0.0,
1337
1338
        min_child_weight: float = 1e-3,
        min_child_samples: int = 20,
1339
        subsample: float = 1.0,
1340
        subsample_freq: int = 0,
1341
1342
1343
1344
        colsample_bytree: float = 1.0,
        reg_alpha: float = 0.0,
        reg_lambda: float = 0.0,
        random_state: Optional[Union[int, np.random.RandomState, "np.random.Generator"]] = None,
1345
        n_jobs: Optional[int] = None,
1346
        importance_type: str = "split",
1347
        client: Optional[Client] = None,
1348
        **kwargs: Any,
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
    ):
        """Docstring is inherited from the lightgbm.LGBMRegressor.__init__."""
        self.client = client
        super().__init__(
            boosting_type=boosting_type,
            num_leaves=num_leaves,
            max_depth=max_depth,
            learning_rate=learning_rate,
            n_estimators=n_estimators,
            subsample_for_bin=subsample_for_bin,
            objective=objective,
            class_weight=class_weight,
            min_split_gain=min_split_gain,
            min_child_weight=min_child_weight,
            min_child_samples=min_child_samples,
            subsample=subsample,
            subsample_freq=subsample_freq,
            colsample_bytree=colsample_bytree,
            reg_alpha=reg_alpha,
            reg_lambda=reg_lambda,
            random_state=random_state,
            n_jobs=n_jobs,
            importance_type=importance_type,
1372
            **kwargs,
1373
1374
1375
        )

    _base_doc = LGBMRegressor.__init__.__doc__
1376
    _before_kwargs, _kwargs, _after_kwargs = _base_doc.partition("**kwargs")  # type: ignore
1377
    __init__.__doc__ = f"""
1378
        {_before_kwargs}client : dask.distributed.Client or None, optional (default=None)
1379
        {" ":4}Dask client. If ``None``, ``distributed.default_client()`` will be used at runtime. The Dask client used by this class will not be saved if the model object is pickled.
1380
1381
        {_kwargs}{_after_kwargs}
        """
1382

1383
    def __getstate__(self) -> Dict[Any, Any]:
1384
        return self._lgb_dask_getstate()
1385

1386
    def fit(  # type: ignore[override]
1387
1388
1389
        self,
        X: _DaskMatrixLike,
        y: _DaskCollection,
1390
1391
        sample_weight: Optional[_DaskVectorLike] = None,
        init_score: Optional[_DaskVectorLike] = None,
1392
1393
        eval_set: Optional[List[Tuple[_DaskMatrixLike, _DaskCollection]]] = None,
        eval_names: Optional[List[str]] = None,
1394
1395
        eval_sample_weight: Optional[List[_DaskVectorLike]] = None,
        eval_init_score: Optional[List[_DaskVectorLike]] = None,
1396
        eval_metric: Optional[_LGBM_ScikitEvalMetricType] = None,
1397
        **kwargs: Any,
1398
    ) -> "DaskLGBMRegressor":
1399
        """Docstring is inherited from the lightgbm.LGBMRegressor.fit."""
1400
        self._lgb_dask_fit(
1401
1402
1403
1404
            model_factory=LGBMRegressor,
            X=X,
            y=y,
            sample_weight=sample_weight,
1405
            init_score=init_score,
1406
1407
1408
1409
1410
            eval_set=eval_set,
            eval_names=eval_names,
            eval_sample_weight=eval_sample_weight,
            eval_init_score=eval_init_score,
            eval_metric=eval_metric,
1411
            **kwargs,
1412
        )
1413
        return self
1414

1415
1416
1417
    _base_doc = _lgbmmodel_doc_fit.format(
        X_shape="Dask Array or Dask DataFrame of shape = [n_samples, n_features]",
        y_shape="Dask Array, Dask DataFrame or Dask Series of shape = [n_samples]",
1418
1419
        sample_weight_shape="Dask Array or Dask Series of shape = [n_samples] or None, optional (default=None)",
        init_score_shape="Dask Array or Dask Series of shape = [n_samples] or None, optional (default=None)",
1420
        group_shape="Dask Array or Dask Series or None, optional (default=None)",
1421
1422
        eval_sample_weight_shape="list of Dask Array or Dask Series, or None, optional (default=None)",
        eval_init_score_shape="list of Dask Array or Dask Series, or None, optional (default=None)",
1423
        eval_group_shape="list of Dask Array or Dask Series, or None, optional (default=None)",
1424
1425
    )

1426
    # DaskLGBMRegressor does not support group, eval_class_weight, eval_group.
1427
    _base_doc = _base_doc[: _base_doc.find("group :")] + _base_doc[_base_doc.find("eval_set :") :]
1428

1429
    _base_doc = _base_doc[: _base_doc.find("eval_class_weight :")] + _base_doc[_base_doc.find("eval_init_score :") :]
1430

1431
    _base_doc = _base_doc[: _base_doc.find("eval_group :")] + _base_doc[_base_doc.find("eval_metric :") :]
1432

1433
    # DaskLGBMRegressor support for callbacks and init_model is not tested
1434
    fit.__doc__ = f"""{_base_doc[: _base_doc.find("callbacks :")]}**kwargs
1435
        Other parameters passed through to ``LGBMRegressor.fit()``.
1436

1437
1438
1439
1440
1441
    Returns
    -------
    self : lightgbm.DaskLGBMRegressor
        Returns self.

1442
    {_lgbmmodel_doc_custom_eval_note}
1443
        """
1444

1445
1446
    def predict(
        self,
1447
        X: _DaskMatrixLike,  # type: ignore[override]
1448
1449
1450
1451
1452
1453
        raw_score: bool = False,
        start_iteration: int = 0,
        num_iteration: Optional[int] = None,
        pred_leaf: bool = False,
        pred_contrib: bool = False,
        validate_features: bool = False,
1454
        **kwargs: Any,
1455
    ) -> dask_Array:
1456
        """Docstring is inherited from the lightgbm.LGBMRegressor.predict."""
1457
1458
1459
        return _predict(
            model=self.to_local(),
            data=X,
1460
            client=_get_dask_client(self.client),
1461
1462
1463
1464
1465
1466
            raw_score=raw_score,
            start_iteration=start_iteration,
            num_iteration=num_iteration,
            pred_leaf=pred_leaf,
            pred_contrib=pred_contrib,
            validate_features=validate_features,
1467
            **kwargs,
1468
1469
        )

1470
1471
1472
1473
1474
1475
    predict.__doc__ = _lgbmmodel_doc_predict.format(
        description="Return the predicted value for each sample.",
        X_shape="Dask Array or Dask DataFrame of shape = [n_samples, n_features]",
        output_name="predicted_result",
        predicted_result_shape="Dask Array of shape = [n_samples]",
        X_leaves_shape="Dask Array of shape = [n_samples, n_trees]",
1476
        X_SHAP_values_shape="Dask Array of shape = [n_samples, n_features + 1]",
1477
    )
1478

1479
    def to_local(self) -> LGBMRegressor:
1480
1481
1482
1483
1484
        """Create regular version of lightgbm.LGBMRegressor from the distributed version.

        Returns
        -------
        model : lightgbm.LGBMRegressor
1485
            Local underlying model.
1486
        """
1487
        return self._lgb_dask_to_local(LGBMRegressor)
1488
1489


1490
class DaskLGBMRanker(LGBMRanker, _DaskLGBMModel):
1491
    """Distributed version of lightgbm.LGBMRanker."""
1492

1493
1494
    def __init__(
        self,
1495
        *,
1496
        boosting_type: str = "gbdt",
1497
1498
1499
1500
1501
        num_leaves: int = 31,
        max_depth: int = -1,
        learning_rate: float = 0.1,
        n_estimators: int = 100,
        subsample_for_bin: int = 200000,
1502
        objective: Optional[Union[str, _LGBM_ScikitCustomObjectiveFunction]] = None,
1503
        class_weight: Optional[Union[dict, str]] = None,
1504
        min_split_gain: float = 0.0,
1505
1506
        min_child_weight: float = 1e-3,
        min_child_samples: int = 20,
1507
        subsample: float = 1.0,
1508
        subsample_freq: int = 0,
1509
1510
1511
1512
        colsample_bytree: float = 1.0,
        reg_alpha: float = 0.0,
        reg_lambda: float = 0.0,
        random_state: Optional[Union[int, np.random.RandomState, "np.random.Generator"]] = None,
1513
        n_jobs: Optional[int] = None,
1514
        importance_type: str = "split",
1515
        client: Optional[Client] = None,
1516
        **kwargs: Any,
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
    ):
        """Docstring is inherited from the lightgbm.LGBMRanker.__init__."""
        self.client = client
        super().__init__(
            boosting_type=boosting_type,
            num_leaves=num_leaves,
            max_depth=max_depth,
            learning_rate=learning_rate,
            n_estimators=n_estimators,
            subsample_for_bin=subsample_for_bin,
            objective=objective,
            class_weight=class_weight,
            min_split_gain=min_split_gain,
            min_child_weight=min_child_weight,
            min_child_samples=min_child_samples,
            subsample=subsample,
            subsample_freq=subsample_freq,
            colsample_bytree=colsample_bytree,
            reg_alpha=reg_alpha,
            reg_lambda=reg_lambda,
            random_state=random_state,
            n_jobs=n_jobs,
            importance_type=importance_type,
1540
            **kwargs,
1541
1542
1543
        )

    _base_doc = LGBMRanker.__init__.__doc__
1544
    _before_kwargs, _kwargs, _after_kwargs = _base_doc.partition("**kwargs")  # type: ignore
1545
    __init__.__doc__ = f"""
1546
        {_before_kwargs}client : dask.distributed.Client or None, optional (default=None)
1547
        {" ":4}Dask client. If ``None``, ``distributed.default_client()`` will be used at runtime. The Dask client used by this class will not be saved if the model object is pickled.
1548
1549
        {_kwargs}{_after_kwargs}
        """
1550
1551

    def __getstate__(self) -> Dict[Any, Any]:
1552
        return self._lgb_dask_getstate()
1553

1554
    def fit(  # type: ignore[override]
1555
1556
1557
        self,
        X: _DaskMatrixLike,
        y: _DaskCollection,
1558
1559
1560
        sample_weight: Optional[_DaskVectorLike] = None,
        init_score: Optional[_DaskVectorLike] = None,
        group: Optional[_DaskVectorLike] = None,
1561
1562
        eval_set: Optional[List[Tuple[_DaskMatrixLike, _DaskCollection]]] = None,
        eval_names: Optional[List[str]] = None,
1563
1564
1565
        eval_sample_weight: Optional[List[_DaskVectorLike]] = None,
        eval_init_score: Optional[List[_DaskVectorLike]] = None,
        eval_group: Optional[List[_DaskVectorLike]] = None,
1566
        eval_metric: Optional[_LGBM_ScikitEvalMetricType] = None,
1567
        eval_at: Union[List[int], Tuple[int, ...]] = (1, 2, 3, 4, 5),
1568
        **kwargs: Any,
1569
    ) -> "DaskLGBMRanker":
1570
        """Docstring is inherited from the lightgbm.LGBMRanker.fit."""
1571
        self._lgb_dask_fit(
1572
1573
1574
1575
            model_factory=LGBMRanker,
            X=X,
            y=y,
            sample_weight=sample_weight,
1576
            init_score=init_score,
1577
            group=group,
1578
1579
1580
1581
1582
1583
1584
            eval_set=eval_set,
            eval_names=eval_names,
            eval_sample_weight=eval_sample_weight,
            eval_init_score=eval_init_score,
            eval_group=eval_group,
            eval_metric=eval_metric,
            eval_at=eval_at,
1585
            **kwargs,
1586
        )
1587
        return self
1588

1589
1590
1591
    _base_doc = _lgbmmodel_doc_fit.format(
        X_shape="Dask Array or Dask DataFrame of shape = [n_samples, n_features]",
        y_shape="Dask Array, Dask DataFrame or Dask Series of shape = [n_samples]",
1592
1593
        sample_weight_shape="Dask Array or Dask Series of shape = [n_samples] or None, optional (default=None)",
        init_score_shape="Dask Array or Dask Series of shape = [n_samples] or None, optional (default=None)",
1594
        group_shape="Dask Array or Dask Series or None, optional (default=None)",
1595
1596
        eval_sample_weight_shape="list of Dask Array or Dask Series, or None, optional (default=None)",
        eval_init_score_shape="list of Dask Array or Dask Series, or None, optional (default=None)",
1597
        eval_group_shape="list of Dask Array or Dask Series, or None, optional (default=None)",
1598
1599
    )

1600
    # DaskLGBMRanker does not support eval_class_weight or early stopping
1601
    _base_doc = _base_doc[: _base_doc.find("eval_class_weight :")] + _base_doc[_base_doc.find("eval_init_score :") :]
1602

1603
1604
1605
1606
    _base_doc = (
        _base_doc[: _base_doc.find("feature_name :")]
        + "eval_at : list or tuple of int, optional (default=(1, 2, 3, 4, 5))\n"
        + f"{' ':8}The evaluation positions of the specified metric.\n"
1607
        + f"{' ':4}{_base_doc[_base_doc.find('feature_name :') :]}"
1608
    )
1609
1610

    # DaskLGBMRanker support for callbacks and init_model is not tested
1611
    fit.__doc__ = f"""{_base_doc[: _base_doc.find("callbacks :")]}**kwargs
1612
        Other parameters passed through to ``LGBMRanker.fit()``.
1613

1614
1615
1616
1617
1618
    Returns
    -------
    self : lightgbm.DaskLGBMRanker
        Returns self.

1619
    {_lgbmmodel_doc_custom_eval_note}
1620
        """
1621

1622
1623
    def predict(
        self,
1624
        X: _DaskMatrixLike,  # type: ignore[override]
1625
1626
1627
1628
1629
1630
        raw_score: bool = False,
        start_iteration: int = 0,
        num_iteration: Optional[int] = None,
        pred_leaf: bool = False,
        pred_contrib: bool = False,
        validate_features: bool = False,
1631
        **kwargs: Any,
1632
    ) -> dask_Array:
1633
        """Docstring is inherited from the lightgbm.LGBMRanker.predict."""
1634
1635
1636
1637
        return _predict(
            model=self.to_local(),
            data=X,
            client=_get_dask_client(self.client),
1638
1639
1640
1641
1642
1643
            raw_score=raw_score,
            start_iteration=start_iteration,
            num_iteration=num_iteration,
            pred_leaf=pred_leaf,
            pred_contrib=pred_contrib,
            validate_features=validate_features,
1644
            **kwargs,
1645
        )
1646

1647
1648
1649
1650
1651
1652
    predict.__doc__ = _lgbmmodel_doc_predict.format(
        description="Return the predicted value for each sample.",
        X_shape="Dask Array or Dask DataFrame of shape = [n_samples, n_features]",
        output_name="predicted_result",
        predicted_result_shape="Dask Array of shape = [n_samples]",
        X_leaves_shape="Dask Array of shape = [n_samples, n_trees]",
1653
        X_SHAP_values_shape="Dask Array of shape = [n_samples, n_features + 1]",
1654
    )
1655

1656
    def to_local(self) -> LGBMRanker:
1657
1658
1659
1660
1661
        """Create regular version of lightgbm.LGBMRanker from the distributed version.

        Returns
        -------
        model : lightgbm.LGBMRanker
1662
            Local underlying model.
1663
        """
1664
        return self._lgb_dask_to_local(LGBMRanker)