dask.py 66.1 KB
Newer Older
1
# coding: utf-8
2
"""Distributed training with LightGBM and dask.distributed.
3

4
This module enables you to perform distributed training with LightGBM on
5
dask.Array and dask.DataFrame collections.
6
7

It is based on dask-lightgbm, which was based on dask-xgboost.
8
"""
9
import operator
10
import socket
11
from collections import defaultdict
12
from copy import deepcopy
13
from enum import Enum, auto
14
from functools import partial
15
from typing import Any, Dict, Iterable, List, Optional, Tuple, Type, Union
16
17
18
from urllib.parse import urlparse

import numpy as np
19
20
import scipy.sparse as ss

21
from .basic import LightGBMError, _choose_param_value, _ConfigAliases, _log_info, _log_warning
22
from .compat import (DASK_INSTALLED, PANDAS_INSTALLED, SKLEARN_INSTALLED, Client, Future, LGBMNotFittedError, concat,
23
24
                     dask_Array, dask_array_from_delayed, dask_bag_from_delayed, dask_DataFrame, dask_Series,
                     default_client, delayed, pd_DataFrame, pd_Series, wait)
25
26
27
from .sklearn import (LGBMClassifier, LGBMModel, LGBMRanker, LGBMRegressor, _LGBM_ScikitCustomObjectiveFunction,
                      _LGBM_ScikitEvalMetricType, _lgbmmodel_doc_custom_eval_note, _lgbmmodel_doc_fit,
                      _lgbmmodel_doc_predict)
28

29
30
31
32
33
34
__all__ = [
    'DaskLGBMClassifier',
    'DaskLGBMRanker',
    'DaskLGBMRegressor',
]

35
36
_DaskCollection = Union[dask_Array, dask_DataFrame, dask_Series]
_DaskMatrixLike = Union[dask_Array, dask_DataFrame]
37
_DaskVectorLike = Union[dask_Array, dask_Series]
38
39
_DaskPart = Union[np.ndarray, pd_DataFrame, pd_Series, ss.spmatrix]
_PredictionDtype = Union[Type[np.float32], Type[np.float64], Type[np.int32], Type[np.int64]]
40

41

42
43
44
45
46
47
class _RemoteSocket:
    def acquire(self) -> int:
        self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
        self.socket.bind(('', 0))
        return self.socket.getsockname()[1]
48

49
50
    def release(self) -> None:
        self.socket.close()
51

52
53
54
55
56

def _acquire_port() -> Tuple[_RemoteSocket, int]:
    s = _RemoteSocket()
    port = s.acquire()
    return s, port
57

58

59
60
61
62
63
64
65
66
67
68
69
70
class _DatasetNames(Enum):
    """Placeholder names used by lightgbm.dask internals to say 'also evaluate the training data'.

    Avoid duplicating the training data when the validation set refers to elements of training data.
    """

    TRAINSET = auto()
    SAMPLE_WEIGHT = auto()
    INIT_SCORE = auto()
    GROUP = auto()


71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
def _get_dask_client(client: Optional[Client]) -> Client:
    """Choose a Dask client to use.

    Parameters
    ----------
    client : dask.distributed.Client or None
        Dask client.

    Returns
    -------
    client : dask.distributed.Client
        A Dask client.
    """
    if client is None:
        return default_client()
    else:
        return client


90
91
def _assign_open_ports_to_workers(
    client: Client,
92
93
    workers: List[str],
) -> Tuple[Dict[str, Future], Dict[str, int]]:
94
95
96
97
    """Assign an open port to each worker.

    Returns
    -------
98
99
    worker_to_socket_future: dict
        mapping from worker address to a future pointing to the remote socket.
100
    worker_to_port: dict
101
        mapping from worker address to an open port in the worker's host.
102
    """
103
104
105
106
107
108
    # Acquire port in worker
    worker_to_future = {}
    for worker in workers:
        worker_to_future[worker] = client.submit(
            _acquire_port,
            workers=[worker],
109
            allow_other_workers=False,
110
            pure=False,
111
        )
112
113
114
115
116
117
118
119
120
121
122
123

    # schedule futures to retrieve each element of the tuple
    worker_to_socket_future = {}
    worker_to_port_future = {}
    for worker, socket_future in worker_to_future.items():
        worker_to_socket_future[worker] = client.submit(operator.itemgetter(0), socket_future)
        worker_to_port_future[worker] = client.submit(operator.itemgetter(1), socket_future)

    # retrieve ports
    worker_to_port = client.gather(worker_to_port_future)

    return worker_to_socket_future, worker_to_port
124
125


126
def _concat(seq: List[_DaskPart]) -> _DaskPart:
127
128
    if isinstance(seq[0], np.ndarray):
        return np.concatenate(seq, axis=0)
129
    elif isinstance(seq[0], (pd_DataFrame, pd_Series)):
130
        return concat(seq, axis=0)
131
132
133
    elif isinstance(seq[0], ss.spmatrix):
        return ss.vstack(seq, format='csr')
    else:
134
        raise TypeError(f'Data must be one of: numpy arrays, pandas dataframes, sparse matrices (from scipy). Got {type(seq[0]).__name__}.')
135
136


137
138
139
140
def _remove_list_padding(*args: Any) -> List[List[Any]]:
    return [[z for z in arg if z is not None] for arg in args]


141
def _pad_eval_names(lgbm_model: LGBMModel, required_names: List[str]) -> LGBMModel:
142
143
144
145
146
147
    """Append missing (key, value) pairs to a LightGBM model's evals_result_ and best_score_ OrderedDict attrs based on a set of required eval_set names.

    Allows users to rely on expected eval_set names being present when fitting DaskLGBM estimators with ``eval_set``.
    """
    for eval_name in required_names:
        if eval_name not in lgbm_model.evals_result_:
148
            lgbm_model.evals_result_[eval_name] = {}
149
        if eval_name not in lgbm_model.best_score_:
150
            lgbm_model.best_score_[eval_name] = {}
151
152
153
154

    return lgbm_model


155
156
157
158
def _train_part(
    params: Dict[str, Any],
    model_factory: Type[LGBMModel],
    list_of_parts: List[Dict[str, _DaskPart]],
159
160
161
    machines: str,
    local_listen_port: int,
    num_machines: int,
162
    return_model: bool,
163
    time_out: int,
164
    remote_socket: _RemoteSocket,
165
166
    **kwargs: Any
) -> Optional[LGBMModel]:
167
    network_params = {
168
169
        'machines': machines,
        'local_listen_port': local_listen_port,
170
        'time_out': time_out,
171
        'num_machines': num_machines
172
    }
173
174
    params.update(network_params)

175
176
    is_ranker = issubclass(model_factory, LGBMRanker)

177
    # Concatenate many parts into one
178
179
180
181
182
183
184
185
186
187
188
189
    data = _concat([x['data'] for x in list_of_parts])
    label = _concat([x['label'] for x in list_of_parts])

    if 'weight' in list_of_parts[0]:
        weight = _concat([x['weight'] for x in list_of_parts])
    else:
        weight = None

    if 'group' in list_of_parts[0]:
        group = _concat([x['group'] for x in list_of_parts])
    else:
        group = None
190

191
192
193
194
195
    if 'init_score' in list_of_parts[0]:
        init_score = _concat([x['init_score'] for x in list_of_parts])
    else:
        init_score = None

196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
    # construct local eval_set data.
    n_evals = max(len(x.get('eval_set', [])) for x in list_of_parts)
    eval_names = kwargs.pop('eval_names', None)
    eval_class_weight = kwargs.get('eval_class_weight')
    local_eval_set = None
    local_eval_names = None
    local_eval_sample_weight = None
    local_eval_init_score = None
    local_eval_group = None

    if n_evals:
        has_eval_sample_weight = any(x.get('eval_sample_weight') is not None for x in list_of_parts)
        has_eval_init_score = any(x.get('eval_init_score') is not None for x in list_of_parts)

        local_eval_set = []
        evals_result_names = []
        if has_eval_sample_weight:
            local_eval_sample_weight = []
        if has_eval_init_score:
            local_eval_init_score = []
        if is_ranker:
            local_eval_group = []

        # store indices of eval_set components that were not contained within local parts.
        missing_eval_component_idx = []

        # consolidate parts of each individual eval component.
        for i in range(n_evals):
            x_e = []
            y_e = []
            w_e = []
            init_score_e = []
            g_e = []
            for part in list_of_parts:
                if not part.get('eval_set'):
                    continue

                # require that eval_name exists in evaluated result data in case dropped due to padding.
                # in distributed training the 'training' eval_set is not detected, will have name 'valid_<index>'.
                if eval_names:
                    evals_result_name = eval_names[i]
                else:
                    evals_result_name = f'valid_{i}'

                eval_set = part['eval_set'][i]
                if eval_set is _DatasetNames.TRAINSET:
                    x_e.append(part['data'])
                    y_e.append(part['label'])
                else:
                    x_e.extend(eval_set[0])
                    y_e.extend(eval_set[1])

                if evals_result_name not in evals_result_names:
                    evals_result_names.append(evals_result_name)

                eval_weight = part.get('eval_sample_weight')
                if eval_weight:
                    if eval_weight[i] is _DatasetNames.SAMPLE_WEIGHT:
                        w_e.append(part['weight'])
                    else:
                        w_e.extend(eval_weight[i])

                eval_init_score = part.get('eval_init_score')
                if eval_init_score:
                    if eval_init_score[i] is _DatasetNames.INIT_SCORE:
                        init_score_e.append(part['init_score'])
                    else:
                        init_score_e.extend(eval_init_score[i])

                eval_group = part.get('eval_group')
                if eval_group:
                    if eval_group[i] is _DatasetNames.GROUP:
                        g_e.append(part['group'])
                    else:
                        g_e.extend(eval_group[i])

            # filter padding from eval parts then _concat each eval_set component.
            x_e, y_e, w_e, init_score_e, g_e = _remove_list_padding(x_e, y_e, w_e, init_score_e, g_e)
            if x_e:
                local_eval_set.append((_concat(x_e), _concat(y_e)))
            else:
                missing_eval_component_idx.append(i)
                continue

            if w_e:
                local_eval_sample_weight.append(_concat(w_e))
            if init_score_e:
                local_eval_init_score.append(_concat(init_score_e))
            if g_e:
                local_eval_group.append(_concat(g_e))

        # reconstruct eval_set fit args/kwargs depending on which components of eval_set are on worker.
        eval_component_idx = [i for i in range(n_evals) if i not in missing_eval_component_idx]
        if eval_names:
            local_eval_names = [eval_names[i] for i in eval_component_idx]
        if eval_class_weight:
            kwargs['eval_class_weight'] = [eval_class_weight[i] for i in eval_component_idx]

294
    model = model_factory(**params)
295
296
    if remote_socket is not None:
        remote_socket.release()
297
    try:
298
        if is_ranker:
299
300
301
302
303
304
305
306
307
308
309
310
311
            model.fit(
                data,
                label,
                sample_weight=weight,
                init_score=init_score,
                group=group,
                eval_set=local_eval_set,
                eval_sample_weight=local_eval_sample_weight,
                eval_init_score=local_eval_init_score,
                eval_group=local_eval_group,
                eval_names=local_eval_names,
                **kwargs
            )
312
        else:
313
314
315
316
317
318
319
320
321
322
323
            model.fit(
                data,
                label,
                sample_weight=weight,
                init_score=init_score,
                eval_set=local_eval_set,
                eval_sample_weight=local_eval_sample_weight,
                eval_init_score=local_eval_init_score,
                eval_names=local_eval_names,
                **kwargs
            )
324

325
    finally:
326
327
        if getattr(model, "fitted_", False):
            model.booster_.free_network()
328

329
330
331
332
    if n_evals:
        # ensure that expected keys for evals_result_ and best_score_ exist regardless of padding.
        model = _pad_eval_names(model, required_names=evals_result_names)

333
334
335
    return model if return_model else None


336
def _split_to_parts(data: _DaskCollection, is_matrix: bool) -> List[_DaskPart]:
337
338
    parts = data.to_delayed()
    if isinstance(parts, np.ndarray):
339
340
341
342
        if is_matrix:
            assert parts.shape[1] == 1
        else:
            assert parts.ndim == 1 or parts.shape[1] == 1
343
344
345
346
        parts = parts.flatten().tolist()
    return parts


347
def _machines_to_worker_map(machines: str, worker_addresses: Iterable[str]) -> Dict[str, int]:
348
349
350
351
352
353
354
355
356
357
    """Create a worker_map from machines list.

    Given ``machines`` and a list of Dask worker addresses, return a mapping where the keys are
    ``worker_addresses`` and the values are ports from ``machines``.

    Parameters
    ----------
    machines : str
        A comma-delimited list of workers, of the form ``ip1:port,ip2:port``.
    worker_addresses : list of str
358
        An iterable of Dask worker addresses, of the form ``{protocol}{hostname}:{port}``, where ``port`` is the port Dask's scheduler uses to talk to that worker.
359
360
361
362
363
364
365

    Returns
    -------
    result : Dict[str, int]
        Dictionary where keys are work addresses in the form expected by Dask and values are a port for LightGBM to use.
    """
    machine_addresses = machines.split(",")
366
367
368
369

    if len(set(machine_addresses)) != len(machine_addresses):
        raise ValueError(f"Found duplicates in 'machines' ({machines}). Each entry in 'machines' must be a unique IP-port combination.")

370
371
372
373
374
375
376
377
    machine_to_port = defaultdict(set)
    for address in machine_addresses:
        host, port = address.split(":")
        machine_to_port[host].add(int(port))

    out = {}
    for address in worker_addresses:
        worker_host = urlparse(address).hostname
378
379
        if not worker_host:
            raise ValueError(f"Could not parse host name from worker address '{address}'")
380
381
382
383
384
        out[address] = machine_to_port[worker_host].pop()

    return out


385
386
387
388
389
390
def _train(
    client: Client,
    data: _DaskMatrixLike,
    label: _DaskCollection,
    params: Dict[str, Any],
    model_factory: Type[LGBMModel],
391
    sample_weight: Optional[_DaskVectorLike] = None,
392
    init_score: Optional[_DaskCollection] = None,
393
    group: Optional[_DaskVectorLike] = None,
394
395
    eval_set: Optional[List[Tuple[_DaskMatrixLike, _DaskCollection]]] = None,
    eval_names: Optional[List[str]] = None,
396
    eval_sample_weight: Optional[List[_DaskVectorLike]] = None,
397
    eval_class_weight: Optional[List[Union[dict, str]]] = None,
398
    eval_init_score: Optional[List[_DaskCollection]] = None,
399
    eval_group: Optional[List[_DaskVectorLike]] = None,
400
    eval_metric: Optional[_LGBM_ScikitEvalMetricType] = None,
401
    eval_at: Optional[Union[List[int], Tuple[int, ...]]] = None,
402
403
    **kwargs: Any
) -> LGBMModel:
404
405
406
407
    """Inner train routine.

    Parameters
    ----------
408
409
    client : dask.distributed.Client
        Dask client.
410
    data : Dask Array or Dask DataFrame of shape = [n_samples, n_features]
411
        Input feature matrix.
412
    label : Dask Array, Dask DataFrame or Dask Series of shape = [n_samples]
413
414
        The target values (class labels in classification, real numbers in regression).
    params : dict
415
        Parameters passed to constructor of the local underlying model.
416
    model_factory : lightgbm.LGBMClassifier, lightgbm.LGBMRegressor, or lightgbm.LGBMRanker class
417
        Class of the local underlying model.
418
    sample_weight : Dask Array or Dask Series of shape = [n_samples] or None, optional (default=None)
419
        Weights of training data. Weights should be non-negative.
420
    init_score : Dask Array or Dask Series of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task), or Dask Array or Dask DataFrame of shape = [n_samples, n_classes] (for multi-class task), or None, optional (default=None)
421
        Init score of training data.
422
    group : Dask Array or Dask Series or None, optional (default=None)
423
424
425
426
427
        Group/query data.
        Only used in the learning-to-rank task.
        sum(group) = n_samples.
        For example, if you have a 100-document dataset with ``group = [10, 20, 40, 10, 10, 10]``, that means that you have 6 groups,
        where the first 10 records are in the first group, records 11-30 are in the second group, records 31-70 are in the third group, etc.
428
    eval_set : list of (X, y) tuples of Dask data collections, or None, optional (default=None)
429
430
431
        List of (X, y) tuple pairs to use as validation sets.
        Note, that not all workers may receive chunks of every eval set within ``eval_set``. When the returned
        lightgbm estimator is not trained using any chunks of a particular eval set, its corresponding component
432
        of ``evals_result_`` and ``best_score_`` will be empty dictionaries.
433
    eval_names : list of str, or None, optional (default=None)
434
        Names of eval_set.
435
    eval_sample_weight : list of Dask Array or Dask Series, or None, optional (default=None)
436
        Weights for each validation set in eval_set. Weights should be non-negative.
437
438
    eval_class_weight : list of dict or str, or None, optional (default=None)
        Class weights, one dict or str for each validation set in eval_set.
439
    eval_init_score : list of Dask Array, Dask Series or Dask DataFrame (for multi-class task), or None, optional (default=None)
440
        Initial model score for each validation set in eval_set.
441
    eval_group : list of Dask Array or Dask Series, or None, optional (default=None)
442
        Group/query for each validation set in eval_set.
443
444
    eval_metric : str, callable, list or None, optional (default=None)
        If str, it should be a built-in evaluation metric to use.
445
446
447
448
        If callable, it should be a custom evaluation metric, see note below for more details.
        If list, it can be a list of built-in metrics, a list of custom evaluation metrics, or a mix of both.
        In either case, the ``metric`` from the Dask model parameters (or inferred from the objective) will be evaluated and used as well.
        Default: 'l2' for DaskLGBMRegressor, 'binary(multi)_logloss' for DaskLGBMClassifier, 'ndcg' for DaskLGBMRanker.
449
    eval_at : list or tuple of int, optional (default=None)
450
        The evaluation positions of the specified ranking metric.
451
452
453
454
455
456
457
    **kwargs
        Other parameters passed to ``fit`` method of the local underlying model.

    Returns
    -------
    model : lightgbm.LGBMClassifier, lightgbm.LGBMRegressor, or lightgbm.LGBMRanker class
        Returns fitted underlying model.
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486

    Note
    ----

    This method handles setting up the following network parameters based on information
    about the Dask cluster referenced by ``client``.

    * ``local_listen_port``: port that each LightGBM worker opens a listening socket on,
            to accept connections from other workers. This can differ from LightGBM worker
            to LightGBM worker, but does not have to.
    * ``machines``: a comma-delimited list of all workers in the cluster, in the
            form ``ip:port,ip:port``. If running multiple Dask workers on the same host, use different
            ports for each worker. For example, for ``LocalCluster(n_workers=3)``, you might
            pass ``"127.0.0.1:12400,127.0.0.1:12401,127.0.0.1:12402"``.
    * ``num_machines``: number of LightGBM workers.
    * ``timeout``: time in minutes to wait before closing unused sockets.

    The default behavior of this function is to generate ``machines`` from the list of
    Dask workers which hold some piece of the training data, and to search for an open
    port on each worker to be used as ``local_listen_port``.

    If ``machines`` is provided explicitly in ``params``, this function uses the hosts
    and ports in that list directly, and does not do any searching. This means that if
    any of the Dask workers are missing from the list or any of those ports are not free
    when training starts, training will fail.

    If ``local_listen_port`` is provided in ``params`` and ``machines`` is not, this function
    constructs ``machines`` from the list of Dask workers which hold some piece of the
    training data, assuming that each one will use the same ``local_listen_port``.
487
    """
488
489
    params = deepcopy(params)

490
491
492
493
494
495
496
497
    # capture whether local_listen_port or its aliases were provided
    listen_port_in_params = any(
        alias in params for alias in _ConfigAliases.get("local_listen_port")
    )

    # capture whether machines or its aliases were provided
    machines_in_params = any(
        alias in params for alias in _ConfigAliases.get("machines")
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
    )

    params = _choose_param_value(
        main_param_name="tree_learner",
        params=params,
        default_value="data"
    )
    allowed_tree_learners = {
        'data',
        'data_parallel',
        'feature',
        'feature_parallel',
        'voting',
        'voting_parallel'
    }
    if params["tree_learner"] not in allowed_tree_learners:
514
        _log_warning(f'Parameter tree_learner set to {params["tree_learner"]}, which is not allowed. Using "data" as default')
515
516
517
518
519
        params['tree_learner'] = 'data'

    # Some passed-in parameters can be removed:
    #   * 'num_machines': set automatically from Dask worker list
    #   * 'num_threads': overridden to match nthreads on each Dask process
520
521
522
523
    for param_alias in _ConfigAliases.get('num_machines', 'num_threads'):
        if param_alias in params:
            _log_warning(f"Parameter {param_alias} will be ignored.")
            params.pop(param_alias)
524

525
    # Split arrays/dataframes into parts. Arrange parts into dicts to enforce co-locality
526
527
    data_parts = _split_to_parts(data=data, is_matrix=True)
    label_parts = _split_to_parts(data=label, is_matrix=False)
528
    parts = [{'data': x, 'label': y} for (x, y) in zip(data_parts, label_parts)]
529
    n_parts = len(parts)
530
531
532

    if sample_weight is not None:
        weight_parts = _split_to_parts(data=sample_weight, is_matrix=False)
533
        for i in range(n_parts):
534
            parts[i]['weight'] = weight_parts[i]
535
536
537

    if group is not None:
        group_parts = _split_to_parts(data=group, is_matrix=False)
538
        for i in range(n_parts):
539
            parts[i]['group'] = group_parts[i]
540

541
542
543
544
545
    if init_score is not None:
        init_score_parts = _split_to_parts(data=init_score, is_matrix=False)
        for i in range(n_parts):
            parts[i]['init_score'] = init_score_parts[i]

546
547
548
549
550
551
552
    # evals_set will to be re-constructed into smaller lists of (X, y) tuples, where
    # X and y are each delayed sub-lists of original eval dask Collections.
    if eval_set:
        # find maximum number of parts in an individual eval set so that we can
        # pad eval sets when they come in different sizes.
        n_largest_eval_parts = max(x[0].npartitions for x in eval_set)

553
554
555
556
557
558
559
560
561
562
563
564
        eval_sets: Dict[
            int,
            List[
                Union[
                    _DatasetNames,
                    Tuple[
                        List[Optional[_DaskMatrixLike]],
                        List[Optional[_DaskVectorLike]]
                    ]
                ]
            ]
        ] = defaultdict(list)
565
        if eval_sample_weight:
566
567
568
569
570
571
572
573
574
            eval_sample_weights: Dict[
                int,
                List[
                    Union[
                        _DatasetNames,
                        List[Optional[_DaskVectorLike]]
                    ]
                ]
            ] = defaultdict(list)
575
        if eval_group:
576
577
578
579
580
581
582
583
584
            eval_groups: Dict[
                int,
                List[
                    Union[
                        _DatasetNames,
                        List[Optional[_DaskVectorLike]]
                    ]
                ]
            ] = defaultdict(list)
585
        if eval_init_score:
586
587
588
589
590
591
592
593
594
            eval_init_scores: Dict[
                int,
                List[
                    Union[
                        _DatasetNames,
                        List[Optional[_DaskMatrixLike]]
                    ]
                ]
            ] = defaultdict(list)
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621

        for i, (X_eval, y_eval) in enumerate(eval_set):
            n_this_eval_parts = X_eval.npartitions

            # when individual eval set is equivalent to training data, skip recomputing parts.
            if X_eval is data and y_eval is label:
                for parts_idx in range(n_parts):
                    eval_sets[parts_idx].append(_DatasetNames.TRAINSET)
            else:
                eval_x_parts = _split_to_parts(data=X_eval, is_matrix=True)
                eval_y_parts = _split_to_parts(data=y_eval, is_matrix=False)
                for j in range(n_largest_eval_parts):
                    parts_idx = j % n_parts

                    # add None-padding for individual eval_set member if it is smaller than the largest member.
                    if j < n_this_eval_parts:
                        x_e = eval_x_parts[j]
                        y_e = eval_y_parts[j]
                    else:
                        x_e = None
                        y_e = None

                    if j < n_parts:
                        # first time a chunk of this eval set is added to this part.
                        eval_sets[parts_idx].append(([x_e], [y_e]))
                    else:
                        # append additional chunks of this eval set to this part.
622
623
                        eval_sets[parts_idx][-1][0].append(x_e)  # type: ignore[index, union-attr]
                        eval_sets[parts_idx][-1][1].append(y_e)  # type: ignore[index, union-attr]
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642

            if eval_sample_weight:
                if eval_sample_weight[i] is sample_weight:
                    for parts_idx in range(n_parts):
                        eval_sample_weights[parts_idx].append(_DatasetNames.SAMPLE_WEIGHT)
                else:
                    eval_w_parts = _split_to_parts(data=eval_sample_weight[i], is_matrix=False)

                    # ensure that all evaluation parts map uniquely to one part.
                    for j in range(n_largest_eval_parts):
                        if j < n_this_eval_parts:
                            w_e = eval_w_parts[j]
                        else:
                            w_e = None

                        parts_idx = j % n_parts
                        if j < n_parts:
                            eval_sample_weights[parts_idx].append([w_e])
                        else:
643
                            eval_sample_weights[parts_idx][-1].append(w_e)  # type: ignore[union-attr]
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660

            if eval_init_score:
                if eval_init_score[i] is init_score:
                    for parts_idx in range(n_parts):
                        eval_init_scores[parts_idx].append(_DatasetNames.INIT_SCORE)
                else:
                    eval_init_score_parts = _split_to_parts(data=eval_init_score[i], is_matrix=False)
                    for j in range(n_largest_eval_parts):
                        if j < n_this_eval_parts:
                            init_score_e = eval_init_score_parts[j]
                        else:
                            init_score_e = None

                        parts_idx = j % n_parts
                        if j < n_parts:
                            eval_init_scores[parts_idx].append([init_score_e])
                        else:
661
                            eval_init_scores[parts_idx][-1].append(init_score_e)  # type: ignore[union-attr]
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678

            if eval_group:
                if eval_group[i] is group:
                    for parts_idx in range(n_parts):
                        eval_groups[parts_idx].append(_DatasetNames.GROUP)
                else:
                    eval_g_parts = _split_to_parts(data=eval_group[i], is_matrix=False)
                    for j in range(n_largest_eval_parts):
                        if j < n_this_eval_parts:
                            g_e = eval_g_parts[j]
                        else:
                            g_e = None

                        parts_idx = j % n_parts
                        if j < n_parts:
                            eval_groups[parts_idx].append([g_e])
                        else:
679
                            eval_groups[parts_idx][-1].append(g_e)  # type: ignore[union-attr]
680
681
682
683
684
685
686
687
688
689
690

        # assign sub-eval_set components to worker parts.
        for parts_idx, e_set in eval_sets.items():
            parts[parts_idx]['eval_set'] = e_set
            if eval_sample_weight:
                parts[parts_idx]['eval_sample_weight'] = eval_sample_weights[parts_idx]
            if eval_init_score:
                parts[parts_idx]['eval_init_score'] = eval_init_scores[parts_idx]
            if eval_group:
                parts[parts_idx]['eval_group'] = eval_groups[parts_idx]

691
    # Start computation in the background
692
    parts = list(map(delayed, parts))
693
694
695
696
    parts = client.compute(parts)
    wait(parts)

    for part in parts:
697
        if part.status == 'error':  # type: ignore
698
699
            # trigger error locally
            return part  # type: ignore[return-value]
700
701

    # Find locations of all parts and map them to particular Dask workers
702
    key_to_part_dict = {part.key: part for part in parts}  # type: ignore
703
704
705
706
707
    who_has = client.who_has(parts)
    worker_map = defaultdict(list)
    for key, workers in who_has.items():
        worker_map[next(iter(workers))].append(key_to_part_dict[key])

708
709
710
711
712
713
    # Check that all workers were provided some of eval_set. Otherwise warn user that validation
    # data artifacts may not be populated depending on worker returning final estimator.
    if eval_set:
        for worker in worker_map:
            has_eval_set = False
            for part in worker_map[worker]:
714
                if 'eval_set' in part.result():  # type: ignore[attr-defined]
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
                    has_eval_set = True
                    break

            if not has_eval_set:
                _log_warning(
                    f"Worker {worker} was not allocated eval_set data. Therefore evals_result_ and best_score_ data may be unreliable. "
                    "Try rebalancing data across workers."
                )

    # assign general validation set settings to fit kwargs.
    if eval_names:
        kwargs['eval_names'] = eval_names
    if eval_class_weight:
        kwargs['eval_class_weight'] = eval_class_weight
    if eval_metric:
        kwargs['eval_metric'] = eval_metric
    if eval_at:
        kwargs['eval_at'] = eval_at

734
735
736
    master_worker = next(iter(worker_map))
    worker_ncores = client.ncores()

737
738
739
740
741
742
    # resolve aliases for network parameters and pop the result off params.
    # these values are added back in calls to `_train_part()`
    params = _choose_param_value(
        main_param_name="local_listen_port",
        params=params,
        default_value=12400
743
    )
744
745
746
747
748
749
750
751
752
753
    local_listen_port = params.pop("local_listen_port")

    params = _choose_param_value(
        main_param_name="machines",
        params=params,
        default_value=None
    )
    machines = params.pop("machines")

    # figure out network params
754
    worker_to_socket_future: Dict[str, Future] = {}
755
756
757
758
759
760
761
762
763
764
    worker_addresses = worker_map.keys()
    if machines is not None:
        _log_info("Using passed-in 'machines' parameter")
        worker_address_to_port = _machines_to_worker_map(
            machines=machines,
            worker_addresses=worker_addresses
        )
    else:
        if listen_port_in_params:
            _log_info("Using passed-in 'local_listen_port' for all workers")
765
            unique_hosts = {urlparse(a).hostname for a in worker_addresses}
766
767
768
769
770
771
772
773
774
775
776
777
778
779
            if len(unique_hosts) < len(worker_addresses):
                msg = (
                    "'local_listen_port' was provided in Dask training parameters, but at least one "
                    "machine in the cluster has multiple Dask worker processes running on it. Please omit "
                    "'local_listen_port' or pass 'machines'."
                )
                raise LightGBMError(msg)

            worker_address_to_port = {
                address: local_listen_port
                for address in worker_addresses
            }
        else:
            _log_info("Finding random open ports for workers")
780
            worker_to_socket_future, worker_address_to_port = _assign_open_ports_to_workers(client, list(worker_map.keys()))
781

782
        machines = ','.join([
783
            f'{urlparse(worker_address).hostname}:{port}'
784
785
786
787
788
            for worker_address, port
            in worker_address_to_port.items()
        ])

    num_machines = len(worker_address_to_port)
789

790
    # Tell each worker to train on the parts that it has locally
791
    #
792
    # This code treats ``_train_part()`` calls as not "pure" because:
793
    #     1. there is randomness in the training process unless parameters ``seed``
794
    #        and ``deterministic`` are set
795
796
797
    #     2. even with those parameters set, the output of one ``_train_part()`` call
    #        relies on global state (it and all the other LightGBM training processes
    #        coordinate with each other)
798
799
800
801
802
803
    futures_classifiers = [
        client.submit(
            _train_part,
            model_factory=model_factory,
            params={**params, 'num_threads': worker_ncores[worker]},
            list_of_parts=list_of_parts,
804
805
806
            machines=machines,
            local_listen_port=worker_address_to_port[worker],
            num_machines=num_machines,
807
            time_out=params.get('time_out', 120),
808
            remote_socket=worker_to_socket_future.get(worker, None),
809
            return_model=(worker == master_worker),
810
811
812
            workers=[worker],
            allow_other_workers=False,
            pure=False,
813
814
815
816
            **kwargs
        )
        for worker, list_of_parts in worker_map.items()
    ]
817
818
819

    results = client.gather(futures_classifiers)
    results = [v for v in results if v]
820
821
822
    model = results[0]

    # if network parameters were changed during training, remove them from the
Andrew Ziem's avatar
Andrew Ziem committed
823
    # returned model so that they're generated dynamically on every run based
824
825
826
827
828
829
830
831
832
833
834
835
836
837
    # on the Dask cluster you're connected to and which workers have pieces of
    # the training data
    if not listen_port_in_params:
        for param in _ConfigAliases.get('local_listen_port'):
            model._other_params.pop(param, None)

    if not machines_in_params:
        for param in _ConfigAliases.get('machines'):
            model._other_params.pop(param, None)

    for param in _ConfigAliases.get('num_machines', 'timeout'):
        model._other_params.pop(param, None)

    return model
838
839


840
841
842
843
844
845
846
847
848
def _predict_part(
    part: _DaskPart,
    model: LGBMModel,
    raw_score: bool,
    pred_proba: bool,
    pred_leaf: bool,
    pred_contrib: bool,
    **kwargs: Any
) -> _DaskPart:
849

850
    result: _DaskPart
851
    if part.shape[0] == 0:
852
        result = np.array([])
853
854
    elif pred_proba:
        result = model.predict_proba(
855
            part,
856
857
858
859
860
            raw_score=raw_score,
            pred_leaf=pred_leaf,
            pred_contrib=pred_contrib,
            **kwargs
        )
861
    else:
862
        result = model.predict(
863
            part,
864
865
866
867
868
            raw_score=raw_score,
            pred_leaf=pred_leaf,
            pred_contrib=pred_contrib,
            **kwargs
        )
869

870
    # dask.DataFrame.map_partitions() expects each call to return a pandas DataFrame or Series
871
    if isinstance(part, pd_DataFrame):
872
        if len(result.shape) == 2:
873
            result = pd_DataFrame(result, index=part.index)
874
        else:
875
            result = pd_Series(result, index=part.index, name='predictions')
876
877
878
879

    return result


880
881
882
def _predict(
    model: LGBMModel,
    data: _DaskMatrixLike,
883
    client: Client,
884
885
886
887
888
889
    raw_score: bool = False,
    pred_proba: bool = False,
    pred_leaf: bool = False,
    pred_contrib: bool = False,
    dtype: _PredictionDtype = np.float32,
    **kwargs: Any
890
) -> Union[dask_Array, List[dask_Array]]:
891
892
893
894
    """Inner predict routine.

    Parameters
    ----------
895
    model : lightgbm.LGBMClassifier, lightgbm.LGBMRegressor, or lightgbm.LGBMRanker class
896
        Fitted underlying model.
897
    data : Dask Array or Dask DataFrame of shape = [n_samples, n_features]
898
        Input feature matrix.
899
900
    raw_score : bool, optional (default=False)
        Whether to predict raw scores.
901
902
903
904
905
906
    pred_proba : bool, optional (default=False)
        Should method return results of ``predict_proba`` (``pred_proba=True``) or ``predict`` (``pred_proba=False``).
    pred_leaf : bool, optional (default=False)
        Whether to predict leaf index.
    pred_contrib : bool, optional (default=False)
        Whether to predict feature contributions.
907
    dtype : np.dtype, optional (default=np.float32)
908
        Dtype of the output.
909
    **kwargs
910
        Other parameters passed to ``predict`` or ``predict_proba`` method.
911
912
913

    Returns
    -------
914
    predicted_result : Dask Array of shape = [n_samples] or shape = [n_samples, n_classes]
915
        The predicted values.
916
    X_leaves : Dask Array of shape = [n_samples, n_trees] or shape = [n_samples, n_trees * n_classes]
917
        If ``pred_leaf=True``, the predicted leaf of every tree for each sample.
918
    X_SHAP_values : Dask Array of shape = [n_samples, n_features + 1] or shape = [n_samples, (n_features + 1) * n_classes] or (if multi-class and using sparse inputs) a list of ``n_classes`` Dask Arrays of shape = [n_samples, n_features + 1]
919
        If ``pred_contrib=True``, the feature contributions for each sample.
920
    """
921
922
    if not all((DASK_INSTALLED, PANDAS_INSTALLED, SKLEARN_INSTALLED)):
        raise LightGBMError('dask, pandas and scikit-learn are required for lightgbm.dask')
923
    if isinstance(data, dask_DataFrame):
924
925
926
927
928
929
930
931
932
        return data.map_partitions(
            _predict_part,
            model=model,
            raw_score=raw_score,
            pred_proba=pred_proba,
            pred_leaf=pred_leaf,
            pred_contrib=pred_contrib,
            **kwargs
        ).values
933
    elif isinstance(data, dask_Array):
934
935
        # for multi-class classification with sparse matrices, pred_contrib predictions
        # are returned as a list of sparse matrices (one per class)
936
        num_classes = model._n_classes
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967

        if (
            num_classes > 2
            and pred_contrib
            and isinstance(data._meta, ss.spmatrix)
        ):

            predict_function = partial(
                _predict_part,
                model=model,
                raw_score=False,
                pred_proba=pred_proba,
                pred_leaf=False,
                pred_contrib=True,
                **kwargs
            )

            delayed_chunks = data.to_delayed()
            bag = dask_bag_from_delayed(delayed_chunks[:, 0])

            @delayed
            def _extract(items: List[Any], i: int) -> Any:
                return items[i]

            preds = bag.map_partitions(predict_function)

            # pred_contrib output will have one column per feature,
            # plus one more for the base value
            num_cols = model.n_features_ + 1

            nrows_per_chunk = data.chunks[0]
968
            out: List[List[dask_Array]] = [[] for _ in range(num_classes)]
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992

            # need to tell Dask the expected type and shape of individual preds
            pred_meta = data._meta

            for j, partition in enumerate(preds.to_delayed()):
                for i in range(num_classes):
                    part = dask_array_from_delayed(
                        value=_extract(partition, i),
                        shape=(nrows_per_chunk[j], num_cols),
                        meta=pred_meta
                    )
                    out[i].append(part)

            # by default, dask.array.concatenate() concatenates sparse arrays into a COO matrix
            # the code below is used instead to ensure that the sparse type is preserved during concatentation
            if isinstance(pred_meta, ss.csr_matrix):
                concat_fn = partial(ss.vstack, format='csr')
            elif isinstance(pred_meta, ss.csc_matrix):
                concat_fn = partial(ss.vstack, format='csc')
            else:
                concat_fn = ss.vstack

            # At this point, `out` is a list of lists of delayeds (each of which points to a matrix).
            # Concatenate them to return a list of Dask Arrays.
993
            out_arrays: List[dask_Array] = []
994
            for i in range(num_classes):
995
996
997
998
999
1000
                out_arrays.append(
                    dask_array_from_delayed(
                        value=delayed(concat_fn)(out[i]),
                        shape=(data.shape[0], num_cols),
                        meta=pred_meta
                    )
1001
1002
                )

1003
            return out_arrays
1004

1005
1006
        data_row = client.compute(data[[0]]).result()
        predict_fn = partial(
1007
1008
1009
1010
1011
1012
            _predict_part,
            model=model,
            raw_score=raw_score,
            pred_proba=pred_proba,
            pred_leaf=pred_leaf,
            pred_contrib=pred_contrib,
1013
1014
1015
            **kwargs,
        )
        pred_row = predict_fn(data_row)
1016
        chunks: Tuple[int, ...] = (data.chunks[0],)
1017
1018
1019
1020
1021
1022
1023
1024
1025
        map_blocks_kwargs = {}
        if len(pred_row.shape) > 1:
            chunks += (pred_row.shape[1],)
        else:
            map_blocks_kwargs['drop_axis'] = 1
        return data.map_blocks(
            predict_fn,
            chunks=chunks,
            meta=pred_row,
1026
            dtype=dtype,
1027
            **map_blocks_kwargs,
1028
        )
1029
    else:
1030
        raise TypeError(f'Data must be either Dask Array or Dask DataFrame. Got {type(data).__name__}.')
1031
1032


1033
class _DaskLGBMModel:
1034

1035
1036
    @property
    def client_(self) -> Client:
1037
        """:obj:`dask.distributed.Client`: Dask client.
1038
1039
1040
1041
1042
1043
1044
1045
1046

        This property can be passed in the constructor or updated
        with ``model.set_params(client=client)``.
        """
        if not getattr(self, "fitted_", False):
            raise LGBMNotFittedError('Cannot access property client_ before calling fit().')

        return _get_dask_client(client=self.client)

1047
    def _lgb_dask_getstate(self) -> Dict[Any, Any]:
1048
1049
        """Remove un-picklable attributes before serialization."""
        client = self.__dict__.pop("client", None)
1050
        self._other_params.pop("client", None)  # type: ignore[attr-defined]
1051
        out = deepcopy(self.__dict__)
1052
        out.update({"client": None})
1053
1054
1055
        self.client = client
        return out

1056
    def _lgb_dask_fit(
1057
1058
1059
1060
        self,
        model_factory: Type[LGBMModel],
        X: _DaskMatrixLike,
        y: _DaskCollection,
1061
        sample_weight: Optional[_DaskVectorLike] = None,
1062
        init_score: Optional[_DaskCollection] = None,
1063
        group: Optional[_DaskVectorLike] = None,
1064
1065
        eval_set: Optional[List[Tuple[_DaskMatrixLike, _DaskCollection]]] = None,
        eval_names: Optional[List[str]] = None,
1066
        eval_sample_weight: Optional[List[_DaskVectorLike]] = None,
1067
        eval_class_weight: Optional[List[Union[dict, str]]] = None,
1068
        eval_init_score: Optional[List[_DaskCollection]] = None,
1069
        eval_group: Optional[List[_DaskVectorLike]] = None,
1070
        eval_metric: Optional[_LGBM_ScikitEvalMetricType] = None,
1071
        eval_at: Optional[Union[List[int], Tuple[int, ...]]] = None,
1072
1073
        **kwargs: Any
    ) -> "_DaskLGBMModel":
1074
1075
        if not DASK_INSTALLED:
            raise LightGBMError('dask is required for lightgbm.dask')
1076
1077
        if not all((DASK_INSTALLED, PANDAS_INSTALLED, SKLEARN_INSTALLED)):
            raise LightGBMError('dask, pandas and scikit-learn are required for lightgbm.dask')
1078

1079
        params = self.get_params(True)  # type: ignore[attr-defined]
1080
        params.pop("client", None)
1081
1082

        model = _train(
1083
            client=_get_dask_client(self.client),
1084
1085
1086
1087
1088
            data=X,
            label=y,
            params=params,
            model_factory=model_factory,
            sample_weight=sample_weight,
1089
            init_score=init_score,
1090
            group=group,
1091
1092
1093
1094
1095
1096
1097
1098
            eval_set=eval_set,
            eval_names=eval_names,
            eval_sample_weight=eval_sample_weight,
            eval_class_weight=eval_class_weight,
            eval_init_score=eval_init_score,
            eval_group=eval_group,
            eval_metric=eval_metric,
            eval_at=eval_at,
1099
1100
            **kwargs
        )
1101

1102
1103
        self.set_params(**model.get_params())  # type: ignore[attr-defined]
        self._lgb_dask_copy_extra_params(model, self)  # type: ignore[attr-defined]
1104
1105
1106

        return self

1107
    def _lgb_dask_to_local(self, model_factory: Type[LGBMModel]) -> LGBMModel:
1108
        params = self.get_params()  # type: ignore[attr-defined]
1109
1110
        params.pop("client", None)
        model = model_factory(**params)
1111
        self._lgb_dask_copy_extra_params(self, model)
1112
        model._other_params.pop("client", None)
1113
1114
1115
        return model

    @staticmethod
1116
    def _lgb_dask_copy_extra_params(source: Union["_DaskLGBMModel", LGBMModel], dest: Union["_DaskLGBMModel", LGBMModel]) -> None:
1117
        params = source.get_params()  # type: ignore[union-attr]
1118
1119
1120
        attributes = source.__dict__
        extra_param_names = set(attributes.keys()).difference(params.keys())
        for name in extra_param_names:
1121
            setattr(dest, name, attributes[name])
1122
1123


1124
class DaskLGBMClassifier(LGBMClassifier, _DaskLGBMModel):
1125
1126
    """Distributed version of lightgbm.LGBMClassifier."""

1127
1128
1129
1130
1131
1132
1133
1134
    def __init__(
        self,
        boosting_type: str = 'gbdt',
        num_leaves: int = 31,
        max_depth: int = -1,
        learning_rate: float = 0.1,
        n_estimators: int = 100,
        subsample_for_bin: int = 200000,
1135
        objective: Optional[Union[str, _LGBM_ScikitCustomObjectiveFunction]] = None,
1136
1137
1138
1139
1140
1141
1142
1143
1144
        class_weight: Optional[Union[dict, str]] = None,
        min_split_gain: float = 0.,
        min_child_weight: float = 1e-3,
        min_child_samples: int = 20,
        subsample: float = 1.,
        subsample_freq: int = 0,
        colsample_bytree: float = 1.,
        reg_alpha: float = 0.,
        reg_lambda: float = 0.,
1145
        random_state: Optional[Union[int, np.random.RandomState, 'np.random.Generator']] = None,
1146
        n_jobs: Optional[int] = None,
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
        importance_type: str = 'split',
        client: Optional[Client] = None,
        **kwargs: Any
    ):
        """Docstring is inherited from the lightgbm.LGBMClassifier.__init__."""
        self.client = client
        super().__init__(
            boosting_type=boosting_type,
            num_leaves=num_leaves,
            max_depth=max_depth,
            learning_rate=learning_rate,
            n_estimators=n_estimators,
            subsample_for_bin=subsample_for_bin,
            objective=objective,
            class_weight=class_weight,
            min_split_gain=min_split_gain,
            min_child_weight=min_child_weight,
            min_child_samples=min_child_samples,
            subsample=subsample,
            subsample_freq=subsample_freq,
            colsample_bytree=colsample_bytree,
            reg_alpha=reg_alpha,
            reg_lambda=reg_lambda,
            random_state=random_state,
            n_jobs=n_jobs,
            importance_type=importance_type,
            **kwargs
        )

    _base_doc = LGBMClassifier.__init__.__doc__
1177
    _before_kwargs, _kwargs, _after_kwargs = _base_doc.partition('**kwargs')  # type: ignore
1178
    __init__.__doc__ = f"""
1179
1180
1181
1182
        {_before_kwargs}client : dask.distributed.Client or None, optional (default=None)
        {' ':4}Dask client. If ``None``, ``distributed.default_client()`` will be used at runtime. The Dask client used by this class will not be saved if the model object is pickled.
        {_kwargs}{_after_kwargs}
        """
1183
1184

    def __getstate__(self) -> Dict[Any, Any]:
1185
        return self._lgb_dask_getstate()
1186

1187
    def fit(  # type: ignore[override]
1188
1189
1190
        self,
        X: _DaskMatrixLike,
        y: _DaskCollection,
1191
        sample_weight: Optional[_DaskVectorLike] = None,
1192
        init_score: Optional[_DaskCollection] = None,
1193
1194
        eval_set: Optional[List[Tuple[_DaskMatrixLike, _DaskCollection]]] = None,
        eval_names: Optional[List[str]] = None,
1195
        eval_sample_weight: Optional[List[_DaskVectorLike]] = None,
1196
        eval_class_weight: Optional[List[Union[dict, str]]] = None,
1197
        eval_init_score: Optional[List[_DaskCollection]] = None,
1198
        eval_metric: Optional[_LGBM_ScikitEvalMetricType] = None,
1199
1200
        **kwargs: Any
    ) -> "DaskLGBMClassifier":
1201
        """Docstring is inherited from the lightgbm.LGBMClassifier.fit."""
1202
        self._lgb_dask_fit(
1203
1204
1205
1206
            model_factory=LGBMClassifier,
            X=X,
            y=y,
            sample_weight=sample_weight,
1207
            init_score=init_score,
1208
1209
1210
1211
1212
1213
            eval_set=eval_set,
            eval_names=eval_names,
            eval_sample_weight=eval_sample_weight,
            eval_class_weight=eval_class_weight,
            eval_init_score=eval_init_score,
            eval_metric=eval_metric,
1214
1215
            **kwargs
        )
1216
        return self
1217

1218
1219
1220
    _base_doc = _lgbmmodel_doc_fit.format(
        X_shape="Dask Array or Dask DataFrame of shape = [n_samples, n_features]",
        y_shape="Dask Array, Dask DataFrame or Dask Series of shape = [n_samples]",
1221
        sample_weight_shape="Dask Array or Dask Series of shape = [n_samples] or None, optional (default=None)",
1222
        init_score_shape="Dask Array or Dask Series of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task), or Dask Array or Dask DataFrame of shape = [n_samples, n_classes] (for multi-class task), or None, optional (default=None)",
1223
        group_shape="Dask Array or Dask Series or None, optional (default=None)",
1224
        eval_sample_weight_shape="list of Dask Array or Dask Series, or None, optional (default=None)",
1225
        eval_init_score_shape="list of Dask Array, Dask Series or Dask DataFrame (for multi-class task), or None, optional (default=None)",
1226
        eval_group_shape="list of Dask Array or Dask Series, or None, optional (default=None)"
1227
1228
    )

1229
    # DaskLGBMClassifier does not support group, eval_group.
1230
    _base_doc = (_base_doc[:_base_doc.find('group :')]
1231
1232
1233
1234
1235
                 + _base_doc[_base_doc.find('eval_set :'):])

    _base_doc = (_base_doc[:_base_doc.find('eval_group :')]
                 + _base_doc[_base_doc.find('eval_metric :'):])

1236
    # DaskLGBMClassifier support for callbacks and init_model is not tested
1237
1238
    fit.__doc__ = f"""{_base_doc[:_base_doc.find('callbacks :')]}**kwargs
        Other parameters passed through to ``LGBMClassifier.fit()``.
1239

1240
1241
1242
1243
1244
    Returns
    -------
    self : lightgbm.DaskLGBMClassifier
        Returns self.

1245
    {_lgbmmodel_doc_custom_eval_note}
1246
        """
1247

1248
1249
    def predict(
        self,
1250
        X: _DaskMatrixLike,  # type: ignore[override]
1251
1252
1253
1254
1255
1256
1257
1258
        raw_score: bool = False,
        start_iteration: int = 0,
        num_iteration: Optional[int] = None,
        pred_leaf: bool = False,
        pred_contrib: bool = False,
        validate_features: bool = False,
        **kwargs: Any
    ) -> dask_Array:
1259
        """Docstring is inherited from the lightgbm.LGBMClassifier.predict."""
1260
1261
1262
1263
        return _predict(
            model=self.to_local(),
            data=X,
            dtype=self.classes_.dtype,
1264
            client=_get_dask_client(self.client),
1265
1266
1267
1268
1269
1270
            raw_score=raw_score,
            start_iteration=start_iteration,
            num_iteration=num_iteration,
            pred_leaf=pred_leaf,
            pred_contrib=pred_contrib,
            validate_features=validate_features,
1271
1272
1273
            **kwargs
        )

1274
1275
1276
1277
1278
1279
    predict.__doc__ = _lgbmmodel_doc_predict.format(
        description="Return the predicted value for each sample.",
        X_shape="Dask Array or Dask DataFrame of shape = [n_samples, n_features]",
        output_name="predicted_result",
        predicted_result_shape="Dask Array of shape = [n_samples] or shape = [n_samples, n_classes]",
        X_leaves_shape="Dask Array of shape = [n_samples, n_trees] or shape = [n_samples, n_trees * n_classes]",
1280
        X_SHAP_values_shape="Dask Array of shape = [n_samples, n_features + 1] or shape = [n_samples, (n_features + 1) * n_classes] or (if multi-class and using sparse inputs) a list of ``n_classes`` Dask Arrays of shape = [n_samples, n_features + 1]"
1281
    )
1282

1283
1284
    def predict_proba(
        self,
1285
        X: _DaskMatrixLike,  # type: ignore[override]
1286
1287
1288
1289
1290
1291
1292
1293
        raw_score: bool = False,
        start_iteration: int = 0,
        num_iteration: Optional[int] = None,
        pred_leaf: bool = False,
        pred_contrib: bool = False,
        validate_features: bool = False,
        **kwargs: Any
    ) -> dask_Array:
1294
        """Docstring is inherited from the lightgbm.LGBMClassifier.predict_proba."""
1295
1296
1297
1298
        return _predict(
            model=self.to_local(),
            data=X,
            pred_proba=True,
1299
            client=_get_dask_client(self.client),
1300
1301
1302
1303
1304
1305
            raw_score=raw_score,
            start_iteration=start_iteration,
            num_iteration=num_iteration,
            pred_leaf=pred_leaf,
            pred_contrib=pred_contrib,
            validate_features=validate_features,
1306
1307
1308
            **kwargs
        )

1309
1310
1311
1312
    predict_proba.__doc__ = _lgbmmodel_doc_predict.format(
        description="Return the predicted probability for each class for each sample.",
        X_shape="Dask Array or Dask DataFrame of shape = [n_samples, n_features]",
        output_name="predicted_probability",
1313
        predicted_result_shape="Dask Array of shape = [n_samples] or shape = [n_samples, n_classes]",
1314
        X_leaves_shape="Dask Array of shape = [n_samples, n_trees] or shape = [n_samples, n_trees * n_classes]",
1315
        X_SHAP_values_shape="Dask Array of shape = [n_samples, n_features + 1] or shape = [n_samples, (n_features + 1) * n_classes] or (if multi-class and using sparse inputs) a list of ``n_classes`` Dask Arrays of shape = [n_samples, n_features + 1]"
1316
    )
1317

1318
    def to_local(self) -> LGBMClassifier:
1319
1320
1321
1322
1323
        """Create regular version of lightgbm.LGBMClassifier from the distributed version.

        Returns
        -------
        model : lightgbm.LGBMClassifier
1324
            Local underlying model.
1325
        """
1326
        return self._lgb_dask_to_local(LGBMClassifier)
1327
1328


1329
class DaskLGBMRegressor(LGBMRegressor, _DaskLGBMModel):
1330
    """Distributed version of lightgbm.LGBMRegressor."""
1331

1332
1333
1334
1335
1336
1337
1338
1339
    def __init__(
        self,
        boosting_type: str = 'gbdt',
        num_leaves: int = 31,
        max_depth: int = -1,
        learning_rate: float = 0.1,
        n_estimators: int = 100,
        subsample_for_bin: int = 200000,
1340
        objective: Optional[Union[str, _LGBM_ScikitCustomObjectiveFunction]] = None,
1341
1342
1343
1344
1345
1346
1347
1348
1349
        class_weight: Optional[Union[dict, str]] = None,
        min_split_gain: float = 0.,
        min_child_weight: float = 1e-3,
        min_child_samples: int = 20,
        subsample: float = 1.,
        subsample_freq: int = 0,
        colsample_bytree: float = 1.,
        reg_alpha: float = 0.,
        reg_lambda: float = 0.,
1350
        random_state: Optional[Union[int, np.random.RandomState, 'np.random.Generator']] = None,
1351
        n_jobs: Optional[int] = None,
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
        importance_type: str = 'split',
        client: Optional[Client] = None,
        **kwargs: Any
    ):
        """Docstring is inherited from the lightgbm.LGBMRegressor.__init__."""
        self.client = client
        super().__init__(
            boosting_type=boosting_type,
            num_leaves=num_leaves,
            max_depth=max_depth,
            learning_rate=learning_rate,
            n_estimators=n_estimators,
            subsample_for_bin=subsample_for_bin,
            objective=objective,
            class_weight=class_weight,
            min_split_gain=min_split_gain,
            min_child_weight=min_child_weight,
            min_child_samples=min_child_samples,
            subsample=subsample,
            subsample_freq=subsample_freq,
            colsample_bytree=colsample_bytree,
            reg_alpha=reg_alpha,
            reg_lambda=reg_lambda,
            random_state=random_state,
            n_jobs=n_jobs,
            importance_type=importance_type,
            **kwargs
        )

    _base_doc = LGBMRegressor.__init__.__doc__
1382
    _before_kwargs, _kwargs, _after_kwargs = _base_doc.partition('**kwargs')  # type: ignore
1383
    __init__.__doc__ = f"""
1384
1385
1386
1387
        {_before_kwargs}client : dask.distributed.Client or None, optional (default=None)
        {' ':4}Dask client. If ``None``, ``distributed.default_client()`` will be used at runtime. The Dask client used by this class will not be saved if the model object is pickled.
        {_kwargs}{_after_kwargs}
        """
1388

1389
    def __getstate__(self) -> Dict[Any, Any]:
1390
        return self._lgb_dask_getstate()
1391

1392
    def fit(  # type: ignore[override]
1393
1394
1395
        self,
        X: _DaskMatrixLike,
        y: _DaskCollection,
1396
1397
        sample_weight: Optional[_DaskVectorLike] = None,
        init_score: Optional[_DaskVectorLike] = None,
1398
1399
        eval_set: Optional[List[Tuple[_DaskMatrixLike, _DaskCollection]]] = None,
        eval_names: Optional[List[str]] = None,
1400
1401
        eval_sample_weight: Optional[List[_DaskVectorLike]] = None,
        eval_init_score: Optional[List[_DaskVectorLike]] = None,
1402
        eval_metric: Optional[_LGBM_ScikitEvalMetricType] = None,
1403
1404
        **kwargs: Any
    ) -> "DaskLGBMRegressor":
1405
        """Docstring is inherited from the lightgbm.LGBMRegressor.fit."""
1406
        self._lgb_dask_fit(
1407
1408
1409
1410
            model_factory=LGBMRegressor,
            X=X,
            y=y,
            sample_weight=sample_weight,
1411
            init_score=init_score,
1412
1413
1414
1415
1416
            eval_set=eval_set,
            eval_names=eval_names,
            eval_sample_weight=eval_sample_weight,
            eval_init_score=eval_init_score,
            eval_metric=eval_metric,
1417
1418
            **kwargs
        )
1419
        return self
1420

1421
1422
1423
    _base_doc = _lgbmmodel_doc_fit.format(
        X_shape="Dask Array or Dask DataFrame of shape = [n_samples, n_features]",
        y_shape="Dask Array, Dask DataFrame or Dask Series of shape = [n_samples]",
1424
1425
        sample_weight_shape="Dask Array or Dask Series of shape = [n_samples] or None, optional (default=None)",
        init_score_shape="Dask Array or Dask Series of shape = [n_samples] or None, optional (default=None)",
1426
        group_shape="Dask Array or Dask Series or None, optional (default=None)",
1427
1428
1429
        eval_sample_weight_shape="list of Dask Array or Dask Series, or None, optional (default=None)",
        eval_init_score_shape="list of Dask Array or Dask Series, or None, optional (default=None)",
        eval_group_shape="list of Dask Array or Dask Series, or None, optional (default=None)"
1430
1431
    )

1432
    # DaskLGBMRegressor does not support group, eval_class_weight, eval_group.
1433
    _base_doc = (_base_doc[:_base_doc.find('group :')]
1434
1435
1436
1437
1438
1439
1440
1441
                 + _base_doc[_base_doc.find('eval_set :'):])

    _base_doc = (_base_doc[:_base_doc.find('eval_class_weight :')]
                 + _base_doc[_base_doc.find('eval_init_score :'):])

    _base_doc = (_base_doc[:_base_doc.find('eval_group :')]
                 + _base_doc[_base_doc.find('eval_metric :'):])

1442
    # DaskLGBMRegressor support for callbacks and init_model is not tested
1443
1444
    fit.__doc__ = f"""{_base_doc[:_base_doc.find('callbacks :')]}**kwargs
        Other parameters passed through to ``LGBMRegressor.fit()``.
1445

1446
1447
1448
1449
1450
    Returns
    -------
    self : lightgbm.DaskLGBMRegressor
        Returns self.

1451
    {_lgbmmodel_doc_custom_eval_note}
1452
        """
1453

1454
1455
    def predict(
        self,
1456
        X: _DaskMatrixLike,  # type: ignore[override]
1457
1458
1459
1460
1461
1462
1463
1464
        raw_score: bool = False,
        start_iteration: int = 0,
        num_iteration: Optional[int] = None,
        pred_leaf: bool = False,
        pred_contrib: bool = False,
        validate_features: bool = False,
        **kwargs: Any
    ) -> dask_Array:
1465
        """Docstring is inherited from the lightgbm.LGBMRegressor.predict."""
1466
1467
1468
        return _predict(
            model=self.to_local(),
            data=X,
1469
            client=_get_dask_client(self.client),
1470
1471
1472
1473
1474
1475
            raw_score=raw_score,
            start_iteration=start_iteration,
            num_iteration=num_iteration,
            pred_leaf=pred_leaf,
            pred_contrib=pred_contrib,
            validate_features=validate_features,
1476
1477
1478
            **kwargs
        )

1479
1480
1481
1482
1483
1484
1485
1486
    predict.__doc__ = _lgbmmodel_doc_predict.format(
        description="Return the predicted value for each sample.",
        X_shape="Dask Array or Dask DataFrame of shape = [n_samples, n_features]",
        output_name="predicted_result",
        predicted_result_shape="Dask Array of shape = [n_samples]",
        X_leaves_shape="Dask Array of shape = [n_samples, n_trees]",
        X_SHAP_values_shape="Dask Array of shape = [n_samples, n_features + 1]"
    )
1487

1488
    def to_local(self) -> LGBMRegressor:
1489
1490
1491
1492
1493
        """Create regular version of lightgbm.LGBMRegressor from the distributed version.

        Returns
        -------
        model : lightgbm.LGBMRegressor
1494
            Local underlying model.
1495
        """
1496
        return self._lgb_dask_to_local(LGBMRegressor)
1497
1498


1499
class DaskLGBMRanker(LGBMRanker, _DaskLGBMModel):
1500
    """Distributed version of lightgbm.LGBMRanker."""
1501

1502
1503
1504
1505
1506
1507
1508
1509
    def __init__(
        self,
        boosting_type: str = 'gbdt',
        num_leaves: int = 31,
        max_depth: int = -1,
        learning_rate: float = 0.1,
        n_estimators: int = 100,
        subsample_for_bin: int = 200000,
1510
        objective: Optional[Union[str, _LGBM_ScikitCustomObjectiveFunction]] = None,
1511
1512
1513
1514
1515
1516
1517
1518
1519
        class_weight: Optional[Union[dict, str]] = None,
        min_split_gain: float = 0.,
        min_child_weight: float = 1e-3,
        min_child_samples: int = 20,
        subsample: float = 1.,
        subsample_freq: int = 0,
        colsample_bytree: float = 1.,
        reg_alpha: float = 0.,
        reg_lambda: float = 0.,
1520
        random_state: Optional[Union[int, np.random.RandomState, 'np.random.Generator']] = None,
1521
        n_jobs: Optional[int] = None,
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
        importance_type: str = 'split',
        client: Optional[Client] = None,
        **kwargs: Any
    ):
        """Docstring is inherited from the lightgbm.LGBMRanker.__init__."""
        self.client = client
        super().__init__(
            boosting_type=boosting_type,
            num_leaves=num_leaves,
            max_depth=max_depth,
            learning_rate=learning_rate,
            n_estimators=n_estimators,
            subsample_for_bin=subsample_for_bin,
            objective=objective,
            class_weight=class_weight,
            min_split_gain=min_split_gain,
            min_child_weight=min_child_weight,
            min_child_samples=min_child_samples,
            subsample=subsample,
            subsample_freq=subsample_freq,
            colsample_bytree=colsample_bytree,
            reg_alpha=reg_alpha,
            reg_lambda=reg_lambda,
            random_state=random_state,
            n_jobs=n_jobs,
            importance_type=importance_type,
            **kwargs
        )

    _base_doc = LGBMRanker.__init__.__doc__
1552
    _before_kwargs, _kwargs, _after_kwargs = _base_doc.partition('**kwargs')  # type: ignore
1553
    __init__.__doc__ = f"""
1554
1555
1556
1557
        {_before_kwargs}client : dask.distributed.Client or None, optional (default=None)
        {' ':4}Dask client. If ``None``, ``distributed.default_client()`` will be used at runtime. The Dask client used by this class will not be saved if the model object is pickled.
        {_kwargs}{_after_kwargs}
        """
1558
1559

    def __getstate__(self) -> Dict[Any, Any]:
1560
        return self._lgb_dask_getstate()
1561

1562
    def fit(  # type: ignore[override]
1563
1564
1565
        self,
        X: _DaskMatrixLike,
        y: _DaskCollection,
1566
1567
1568
        sample_weight: Optional[_DaskVectorLike] = None,
        init_score: Optional[_DaskVectorLike] = None,
        group: Optional[_DaskVectorLike] = None,
1569
1570
        eval_set: Optional[List[Tuple[_DaskMatrixLike, _DaskCollection]]] = None,
        eval_names: Optional[List[str]] = None,
1571
1572
1573
        eval_sample_weight: Optional[List[_DaskVectorLike]] = None,
        eval_init_score: Optional[List[_DaskVectorLike]] = None,
        eval_group: Optional[List[_DaskVectorLike]] = None,
1574
        eval_metric: Optional[_LGBM_ScikitEvalMetricType] = None,
1575
        eval_at: Union[List[int], Tuple[int, ...]] = (1, 2, 3, 4, 5),
1576
1577
        **kwargs: Any
    ) -> "DaskLGBMRanker":
1578
        """Docstring is inherited from the lightgbm.LGBMRanker.fit."""
1579
        self._lgb_dask_fit(
1580
1581
1582
1583
            model_factory=LGBMRanker,
            X=X,
            y=y,
            sample_weight=sample_weight,
1584
            init_score=init_score,
1585
            group=group,
1586
1587
1588
1589
1590
1591
1592
            eval_set=eval_set,
            eval_names=eval_names,
            eval_sample_weight=eval_sample_weight,
            eval_init_score=eval_init_score,
            eval_group=eval_group,
            eval_metric=eval_metric,
            eval_at=eval_at,
1593
1594
            **kwargs
        )
1595
        return self
1596

1597
1598
1599
    _base_doc = _lgbmmodel_doc_fit.format(
        X_shape="Dask Array or Dask DataFrame of shape = [n_samples, n_features]",
        y_shape="Dask Array, Dask DataFrame or Dask Series of shape = [n_samples]",
1600
1601
        sample_weight_shape="Dask Array or Dask Series of shape = [n_samples] or None, optional (default=None)",
        init_score_shape="Dask Array or Dask Series of shape = [n_samples] or None, optional (default=None)",
1602
        group_shape="Dask Array or Dask Series or None, optional (default=None)",
1603
1604
1605
        eval_sample_weight_shape="list of Dask Array or Dask Series, or None, optional (default=None)",
        eval_init_score_shape="list of Dask Array or Dask Series, or None, optional (default=None)",
        eval_group_shape="list of Dask Array or Dask Series, or None, optional (default=None)"
1606
1607
    )

1608
1609
1610
1611
    # DaskLGBMRanker does not support eval_class_weight or early stopping
    _base_doc = (_base_doc[:_base_doc.find('eval_class_weight :')]
                 + _base_doc[_base_doc.find('eval_init_score :'):])

1612
    _base_doc = (_base_doc[:_base_doc.find('feature_name :')]
1613
                 + "eval_at : list or tuple of int, optional (default=(1, 2, 3, 4, 5))\n"
1614
                 + f"{' ':8}The evaluation positions of the specified metric.\n"
1615
                 + f"{' ':4}{_base_doc[_base_doc.find('feature_name :'):]}")
1616
1617

    # DaskLGBMRanker support for callbacks and init_model is not tested
1618
1619
    fit.__doc__ = f"""{_base_doc[:_base_doc.find('callbacks :')]}**kwargs
        Other parameters passed through to ``LGBMRanker.fit()``.
1620

1621
1622
1623
1624
1625
    Returns
    -------
    self : lightgbm.DaskLGBMRanker
        Returns self.

1626
    {_lgbmmodel_doc_custom_eval_note}
1627
        """
1628

1629
1630
    def predict(
        self,
1631
        X: _DaskMatrixLike,  # type: ignore[override]
1632
1633
1634
1635
1636
1637
1638
1639
        raw_score: bool = False,
        start_iteration: int = 0,
        num_iteration: Optional[int] = None,
        pred_leaf: bool = False,
        pred_contrib: bool = False,
        validate_features: bool = False,
        **kwargs: Any
    ) -> dask_Array:
1640
        """Docstring is inherited from the lightgbm.LGBMRanker.predict."""
1641
1642
1643
1644
        return _predict(
            model=self.to_local(),
            data=X,
            client=_get_dask_client(self.client),
1645
1646
1647
1648
1649
1650
            raw_score=raw_score,
            start_iteration=start_iteration,
            num_iteration=num_iteration,
            pred_leaf=pred_leaf,
            pred_contrib=pred_contrib,
            validate_features=validate_features,
1651
1652
            **kwargs
        )
1653

1654
1655
1656
1657
1658
1659
1660
1661
    predict.__doc__ = _lgbmmodel_doc_predict.format(
        description="Return the predicted value for each sample.",
        X_shape="Dask Array or Dask DataFrame of shape = [n_samples, n_features]",
        output_name="predicted_result",
        predicted_result_shape="Dask Array of shape = [n_samples]",
        X_leaves_shape="Dask Array of shape = [n_samples, n_trees]",
        X_SHAP_values_shape="Dask Array of shape = [n_samples, n_features + 1]"
    )
1662

1663
    def to_local(self) -> LGBMRanker:
1664
1665
1666
1667
1668
        """Create regular version of lightgbm.LGBMRanker from the distributed version.

        Returns
        -------
        model : lightgbm.LGBMRanker
1669
            Local underlying model.
1670
        """
1671
        return self._lgb_dask_to_local(LGBMRanker)