dask.py 66.1 KB
Newer Older
1
# coding: utf-8
2
"""Distributed training with LightGBM and dask.distributed.
3

4
This module enables you to perform distributed training with LightGBM on
5
dask.Array and dask.DataFrame collections.
6
7

It is based on dask-lightgbm, which was based on dask-xgboost.
8
"""
9
import operator
10
import socket
11
from collections import defaultdict
12
from copy import deepcopy
13
from enum import Enum, auto
14
from functools import partial
15
from typing import Any, Dict, Iterable, List, Optional, Tuple, Type, Union
16
17
18
from urllib.parse import urlparse

import numpy as np
19
20
import scipy.sparse as ss

21
from .basic import LightGBMError, _choose_param_value, _ConfigAliases, _log_info, _log_warning
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
from .compat import (
    DASK_INSTALLED,
    PANDAS_INSTALLED,
    SKLEARN_INSTALLED,
    Client,
    Future,
    LGBMNotFittedError,
    concat,
    dask_Array,
    dask_array_from_delayed,
    dask_bag_from_delayed,
    dask_DataFrame,
    dask_Series,
    default_client,
    delayed,
    pd_DataFrame,
    pd_Series,
    wait,
)
from .sklearn import (
    LGBMClassifier,
    LGBMModel,
    LGBMRanker,
    LGBMRegressor,
    _LGBM_ScikitCustomObjectiveFunction,
    _LGBM_ScikitEvalMetricType,
    _lgbmmodel_doc_custom_eval_note,
    _lgbmmodel_doc_fit,
    _lgbmmodel_doc_predict,
)
52

53
54
55
56
57
58
__all__ = [
    'DaskLGBMClassifier',
    'DaskLGBMRanker',
    'DaskLGBMRegressor',
]

59
60
_DaskCollection = Union[dask_Array, dask_DataFrame, dask_Series]
_DaskMatrixLike = Union[dask_Array, dask_DataFrame]
61
_DaskVectorLike = Union[dask_Array, dask_Series]
62
63
_DaskPart = Union[np.ndarray, pd_DataFrame, pd_Series, ss.spmatrix]
_PredictionDtype = Union[Type[np.float32], Type[np.float64], Type[np.int32], Type[np.int64]]
64

65

66
67
68
69
70
71
class _RemoteSocket:
    def acquire(self) -> int:
        self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
        self.socket.bind(('', 0))
        return self.socket.getsockname()[1]
72

73
74
    def release(self) -> None:
        self.socket.close()
75

76
77
78
79
80

def _acquire_port() -> Tuple[_RemoteSocket, int]:
    s = _RemoteSocket()
    port = s.acquire()
    return s, port
81

82

83
84
85
86
87
88
89
90
91
92
93
94
class _DatasetNames(Enum):
    """Placeholder names used by lightgbm.dask internals to say 'also evaluate the training data'.

    Avoid duplicating the training data when the validation set refers to elements of training data.
    """

    TRAINSET = auto()
    SAMPLE_WEIGHT = auto()
    INIT_SCORE = auto()
    GROUP = auto()


95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
def _get_dask_client(client: Optional[Client]) -> Client:
    """Choose a Dask client to use.

    Parameters
    ----------
    client : dask.distributed.Client or None
        Dask client.

    Returns
    -------
    client : dask.distributed.Client
        A Dask client.
    """
    if client is None:
        return default_client()
    else:
        return client


114
115
def _assign_open_ports_to_workers(
    client: Client,
116
117
    workers: List[str],
) -> Tuple[Dict[str, Future], Dict[str, int]]:
118
119
120
121
    """Assign an open port to each worker.

    Returns
    -------
122
123
    worker_to_socket_future: dict
        mapping from worker address to a future pointing to the remote socket.
124
    worker_to_port: dict
125
        mapping from worker address to an open port in the worker's host.
126
    """
127
128
129
130
131
132
    # Acquire port in worker
    worker_to_future = {}
    for worker in workers:
        worker_to_future[worker] = client.submit(
            _acquire_port,
            workers=[worker],
133
            allow_other_workers=False,
134
            pure=False,
135
        )
136
137
138
139
140
141
142
143
144
145
146
147

    # schedule futures to retrieve each element of the tuple
    worker_to_socket_future = {}
    worker_to_port_future = {}
    for worker, socket_future in worker_to_future.items():
        worker_to_socket_future[worker] = client.submit(operator.itemgetter(0), socket_future)
        worker_to_port_future[worker] = client.submit(operator.itemgetter(1), socket_future)

    # retrieve ports
    worker_to_port = client.gather(worker_to_port_future)

    return worker_to_socket_future, worker_to_port
148
149


150
def _concat(seq: List[_DaskPart]) -> _DaskPart:
151
152
    if isinstance(seq[0], np.ndarray):
        return np.concatenate(seq, axis=0)
153
    elif isinstance(seq[0], (pd_DataFrame, pd_Series)):
154
        return concat(seq, axis=0)
155
156
157
    elif isinstance(seq[0], ss.spmatrix):
        return ss.vstack(seq, format='csr')
    else:
158
        raise TypeError(f'Data must be one of: numpy arrays, pandas dataframes, sparse matrices (from scipy). Got {type(seq[0]).__name__}.')
159
160


161
162
163
164
def _remove_list_padding(*args: Any) -> List[List[Any]]:
    return [[z for z in arg if z is not None] for arg in args]


165
def _pad_eval_names(lgbm_model: LGBMModel, required_names: List[str]) -> LGBMModel:
166
167
168
169
170
171
    """Append missing (key, value) pairs to a LightGBM model's evals_result_ and best_score_ OrderedDict attrs based on a set of required eval_set names.

    Allows users to rely on expected eval_set names being present when fitting DaskLGBM estimators with ``eval_set``.
    """
    for eval_name in required_names:
        if eval_name not in lgbm_model.evals_result_:
172
            lgbm_model.evals_result_[eval_name] = {}
173
        if eval_name not in lgbm_model.best_score_:
174
            lgbm_model.best_score_[eval_name] = {}
175
176
177
178

    return lgbm_model


179
180
181
182
def _train_part(
    params: Dict[str, Any],
    model_factory: Type[LGBMModel],
    list_of_parts: List[Dict[str, _DaskPart]],
183
184
185
    machines: str,
    local_listen_port: int,
    num_machines: int,
186
    return_model: bool,
187
    time_out: int,
188
    remote_socket: _RemoteSocket,
189
190
    **kwargs: Any
) -> Optional[LGBMModel]:
191
    network_params = {
192
193
        'machines': machines,
        'local_listen_port': local_listen_port,
194
        'time_out': time_out,
195
        'num_machines': num_machines
196
    }
197
198
    params.update(network_params)

199
200
    is_ranker = issubclass(model_factory, LGBMRanker)

201
    # Concatenate many parts into one
202
203
204
205
206
207
208
209
210
211
212
213
    data = _concat([x['data'] for x in list_of_parts])
    label = _concat([x['label'] for x in list_of_parts])

    if 'weight' in list_of_parts[0]:
        weight = _concat([x['weight'] for x in list_of_parts])
    else:
        weight = None

    if 'group' in list_of_parts[0]:
        group = _concat([x['group'] for x in list_of_parts])
    else:
        group = None
214

215
216
217
218
219
    if 'init_score' in list_of_parts[0]:
        init_score = _concat([x['init_score'] for x in list_of_parts])
    else:
        init_score = None

220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
    # construct local eval_set data.
    n_evals = max(len(x.get('eval_set', [])) for x in list_of_parts)
    eval_names = kwargs.pop('eval_names', None)
    eval_class_weight = kwargs.get('eval_class_weight')
    local_eval_set = None
    local_eval_names = None
    local_eval_sample_weight = None
    local_eval_init_score = None
    local_eval_group = None

    if n_evals:
        has_eval_sample_weight = any(x.get('eval_sample_weight') is not None for x in list_of_parts)
        has_eval_init_score = any(x.get('eval_init_score') is not None for x in list_of_parts)

        local_eval_set = []
        evals_result_names = []
        if has_eval_sample_weight:
            local_eval_sample_weight = []
        if has_eval_init_score:
            local_eval_init_score = []
        if is_ranker:
            local_eval_group = []

        # store indices of eval_set components that were not contained within local parts.
        missing_eval_component_idx = []

        # consolidate parts of each individual eval component.
        for i in range(n_evals):
            x_e = []
            y_e = []
            w_e = []
            init_score_e = []
            g_e = []
            for part in list_of_parts:
                if not part.get('eval_set'):
                    continue

                # require that eval_name exists in evaluated result data in case dropped due to padding.
                # in distributed training the 'training' eval_set is not detected, will have name 'valid_<index>'.
                if eval_names:
                    evals_result_name = eval_names[i]
                else:
                    evals_result_name = f'valid_{i}'

                eval_set = part['eval_set'][i]
                if eval_set is _DatasetNames.TRAINSET:
                    x_e.append(part['data'])
                    y_e.append(part['label'])
                else:
                    x_e.extend(eval_set[0])
                    y_e.extend(eval_set[1])

                if evals_result_name not in evals_result_names:
                    evals_result_names.append(evals_result_name)

                eval_weight = part.get('eval_sample_weight')
                if eval_weight:
                    if eval_weight[i] is _DatasetNames.SAMPLE_WEIGHT:
                        w_e.append(part['weight'])
                    else:
                        w_e.extend(eval_weight[i])

                eval_init_score = part.get('eval_init_score')
                if eval_init_score:
                    if eval_init_score[i] is _DatasetNames.INIT_SCORE:
                        init_score_e.append(part['init_score'])
                    else:
                        init_score_e.extend(eval_init_score[i])

                eval_group = part.get('eval_group')
                if eval_group:
                    if eval_group[i] is _DatasetNames.GROUP:
                        g_e.append(part['group'])
                    else:
                        g_e.extend(eval_group[i])

            # filter padding from eval parts then _concat each eval_set component.
            x_e, y_e, w_e, init_score_e, g_e = _remove_list_padding(x_e, y_e, w_e, init_score_e, g_e)
            if x_e:
                local_eval_set.append((_concat(x_e), _concat(y_e)))
            else:
                missing_eval_component_idx.append(i)
                continue

            if w_e:
                local_eval_sample_weight.append(_concat(w_e))
            if init_score_e:
                local_eval_init_score.append(_concat(init_score_e))
            if g_e:
                local_eval_group.append(_concat(g_e))

        # reconstruct eval_set fit args/kwargs depending on which components of eval_set are on worker.
        eval_component_idx = [i for i in range(n_evals) if i not in missing_eval_component_idx]
        if eval_names:
            local_eval_names = [eval_names[i] for i in eval_component_idx]
        if eval_class_weight:
            kwargs['eval_class_weight'] = [eval_class_weight[i] for i in eval_component_idx]

318
    model = model_factory(**params)
319
320
    if remote_socket is not None:
        remote_socket.release()
321
    try:
322
        if is_ranker:
323
324
325
326
327
328
329
330
331
332
333
334
335
            model.fit(
                data,
                label,
                sample_weight=weight,
                init_score=init_score,
                group=group,
                eval_set=local_eval_set,
                eval_sample_weight=local_eval_sample_weight,
                eval_init_score=local_eval_init_score,
                eval_group=local_eval_group,
                eval_names=local_eval_names,
                **kwargs
            )
336
        else:
337
338
339
340
341
342
343
344
345
346
347
            model.fit(
                data,
                label,
                sample_weight=weight,
                init_score=init_score,
                eval_set=local_eval_set,
                eval_sample_weight=local_eval_sample_weight,
                eval_init_score=local_eval_init_score,
                eval_names=local_eval_names,
                **kwargs
            )
348

349
    finally:
350
351
        if getattr(model, "fitted_", False):
            model.booster_.free_network()
352

353
354
355
356
    if n_evals:
        # ensure that expected keys for evals_result_ and best_score_ exist regardless of padding.
        model = _pad_eval_names(model, required_names=evals_result_names)

357
358
359
    return model if return_model else None


360
def _split_to_parts(data: _DaskCollection, is_matrix: bool) -> List[_DaskPart]:
361
362
    parts = data.to_delayed()
    if isinstance(parts, np.ndarray):
363
364
365
366
        if is_matrix:
            assert parts.shape[1] == 1
        else:
            assert parts.ndim == 1 or parts.shape[1] == 1
367
368
369
370
        parts = parts.flatten().tolist()
    return parts


371
def _machines_to_worker_map(machines: str, worker_addresses: Iterable[str]) -> Dict[str, int]:
372
373
374
375
376
377
378
379
380
381
    """Create a worker_map from machines list.

    Given ``machines`` and a list of Dask worker addresses, return a mapping where the keys are
    ``worker_addresses`` and the values are ports from ``machines``.

    Parameters
    ----------
    machines : str
        A comma-delimited list of workers, of the form ``ip1:port,ip2:port``.
    worker_addresses : list of str
382
        An iterable of Dask worker addresses, of the form ``{protocol}{hostname}:{port}``, where ``port`` is the port Dask's scheduler uses to talk to that worker.
383
384
385
386
387
388
389

    Returns
    -------
    result : Dict[str, int]
        Dictionary where keys are work addresses in the form expected by Dask and values are a port for LightGBM to use.
    """
    machine_addresses = machines.split(",")
390
391
392
393

    if len(set(machine_addresses)) != len(machine_addresses):
        raise ValueError(f"Found duplicates in 'machines' ({machines}). Each entry in 'machines' must be a unique IP-port combination.")

394
395
396
397
398
399
400
401
    machine_to_port = defaultdict(set)
    for address in machine_addresses:
        host, port = address.split(":")
        machine_to_port[host].add(int(port))

    out = {}
    for address in worker_addresses:
        worker_host = urlparse(address).hostname
402
403
        if not worker_host:
            raise ValueError(f"Could not parse host name from worker address '{address}'")
404
405
406
407
408
        out[address] = machine_to_port[worker_host].pop()

    return out


409
410
411
412
413
414
def _train(
    client: Client,
    data: _DaskMatrixLike,
    label: _DaskCollection,
    params: Dict[str, Any],
    model_factory: Type[LGBMModel],
415
    sample_weight: Optional[_DaskVectorLike] = None,
416
    init_score: Optional[_DaskCollection] = None,
417
    group: Optional[_DaskVectorLike] = None,
418
419
    eval_set: Optional[List[Tuple[_DaskMatrixLike, _DaskCollection]]] = None,
    eval_names: Optional[List[str]] = None,
420
    eval_sample_weight: Optional[List[_DaskVectorLike]] = None,
421
    eval_class_weight: Optional[List[Union[dict, str]]] = None,
422
    eval_init_score: Optional[List[_DaskCollection]] = None,
423
    eval_group: Optional[List[_DaskVectorLike]] = None,
424
    eval_metric: Optional[_LGBM_ScikitEvalMetricType] = None,
425
    eval_at: Optional[Union[List[int], Tuple[int, ...]]] = None,
426
427
    **kwargs: Any
) -> LGBMModel:
428
429
430
431
    """Inner train routine.

    Parameters
    ----------
432
433
    client : dask.distributed.Client
        Dask client.
434
    data : Dask Array or Dask DataFrame of shape = [n_samples, n_features]
435
        Input feature matrix.
436
    label : Dask Array, Dask DataFrame or Dask Series of shape = [n_samples]
437
438
        The target values (class labels in classification, real numbers in regression).
    params : dict
439
        Parameters passed to constructor of the local underlying model.
440
    model_factory : lightgbm.LGBMClassifier, lightgbm.LGBMRegressor, or lightgbm.LGBMRanker class
441
        Class of the local underlying model.
442
    sample_weight : Dask Array or Dask Series of shape = [n_samples] or None, optional (default=None)
443
        Weights of training data. Weights should be non-negative.
444
    init_score : Dask Array or Dask Series of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task), or Dask Array or Dask DataFrame of shape = [n_samples, n_classes] (for multi-class task), or None, optional (default=None)
445
        Init score of training data.
446
    group : Dask Array or Dask Series or None, optional (default=None)
447
448
449
450
451
        Group/query data.
        Only used in the learning-to-rank task.
        sum(group) = n_samples.
        For example, if you have a 100-document dataset with ``group = [10, 20, 40, 10, 10, 10]``, that means that you have 6 groups,
        where the first 10 records are in the first group, records 11-30 are in the second group, records 31-70 are in the third group, etc.
452
    eval_set : list of (X, y) tuples of Dask data collections, or None, optional (default=None)
453
454
455
        List of (X, y) tuple pairs to use as validation sets.
        Note, that not all workers may receive chunks of every eval set within ``eval_set``. When the returned
        lightgbm estimator is not trained using any chunks of a particular eval set, its corresponding component
456
        of ``evals_result_`` and ``best_score_`` will be empty dictionaries.
457
    eval_names : list of str, or None, optional (default=None)
458
        Names of eval_set.
459
    eval_sample_weight : list of Dask Array or Dask Series, or None, optional (default=None)
460
        Weights for each validation set in eval_set. Weights should be non-negative.
461
462
    eval_class_weight : list of dict or str, or None, optional (default=None)
        Class weights, one dict or str for each validation set in eval_set.
463
    eval_init_score : list of Dask Array, Dask Series or Dask DataFrame (for multi-class task), or None, optional (default=None)
464
        Initial model score for each validation set in eval_set.
465
    eval_group : list of Dask Array or Dask Series, or None, optional (default=None)
466
        Group/query for each validation set in eval_set.
467
468
    eval_metric : str, callable, list or None, optional (default=None)
        If str, it should be a built-in evaluation metric to use.
469
470
471
472
        If callable, it should be a custom evaluation metric, see note below for more details.
        If list, it can be a list of built-in metrics, a list of custom evaluation metrics, or a mix of both.
        In either case, the ``metric`` from the Dask model parameters (or inferred from the objective) will be evaluated and used as well.
        Default: 'l2' for DaskLGBMRegressor, 'binary(multi)_logloss' for DaskLGBMClassifier, 'ndcg' for DaskLGBMRanker.
473
    eval_at : list or tuple of int, optional (default=None)
474
        The evaluation positions of the specified ranking metric.
475
476
477
478
479
480
481
    **kwargs
        Other parameters passed to ``fit`` method of the local underlying model.

    Returns
    -------
    model : lightgbm.LGBMClassifier, lightgbm.LGBMRegressor, or lightgbm.LGBMRanker class
        Returns fitted underlying model.
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510

    Note
    ----

    This method handles setting up the following network parameters based on information
    about the Dask cluster referenced by ``client``.

    * ``local_listen_port``: port that each LightGBM worker opens a listening socket on,
            to accept connections from other workers. This can differ from LightGBM worker
            to LightGBM worker, but does not have to.
    * ``machines``: a comma-delimited list of all workers in the cluster, in the
            form ``ip:port,ip:port``. If running multiple Dask workers on the same host, use different
            ports for each worker. For example, for ``LocalCluster(n_workers=3)``, you might
            pass ``"127.0.0.1:12400,127.0.0.1:12401,127.0.0.1:12402"``.
    * ``num_machines``: number of LightGBM workers.
    * ``timeout``: time in minutes to wait before closing unused sockets.

    The default behavior of this function is to generate ``machines`` from the list of
    Dask workers which hold some piece of the training data, and to search for an open
    port on each worker to be used as ``local_listen_port``.

    If ``machines`` is provided explicitly in ``params``, this function uses the hosts
    and ports in that list directly, and does not do any searching. This means that if
    any of the Dask workers are missing from the list or any of those ports are not free
    when training starts, training will fail.

    If ``local_listen_port`` is provided in ``params`` and ``machines`` is not, this function
    constructs ``machines`` from the list of Dask workers which hold some piece of the
    training data, assuming that each one will use the same ``local_listen_port``.
511
    """
512
513
    params = deepcopy(params)

514
515
516
517
518
519
520
521
    # capture whether local_listen_port or its aliases were provided
    listen_port_in_params = any(
        alias in params for alias in _ConfigAliases.get("local_listen_port")
    )

    # capture whether machines or its aliases were provided
    machines_in_params = any(
        alias in params for alias in _ConfigAliases.get("machines")
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
    )

    params = _choose_param_value(
        main_param_name="tree_learner",
        params=params,
        default_value="data"
    )
    allowed_tree_learners = {
        'data',
        'data_parallel',
        'feature',
        'feature_parallel',
        'voting',
        'voting_parallel'
    }
    if params["tree_learner"] not in allowed_tree_learners:
538
        _log_warning(f'Parameter tree_learner set to {params["tree_learner"]}, which is not allowed. Using "data" as default')
539
540
541
542
543
        params['tree_learner'] = 'data'

    # Some passed-in parameters can be removed:
    #   * 'num_machines': set automatically from Dask worker list
    #   * 'num_threads': overridden to match nthreads on each Dask process
544
545
546
547
    for param_alias in _ConfigAliases.get('num_machines', 'num_threads'):
        if param_alias in params:
            _log_warning(f"Parameter {param_alias} will be ignored.")
            params.pop(param_alias)
548

549
    # Split arrays/dataframes into parts. Arrange parts into dicts to enforce co-locality
550
551
    data_parts = _split_to_parts(data=data, is_matrix=True)
    label_parts = _split_to_parts(data=label, is_matrix=False)
552
    parts = [{'data': x, 'label': y} for (x, y) in zip(data_parts, label_parts)]
553
    n_parts = len(parts)
554
555
556

    if sample_weight is not None:
        weight_parts = _split_to_parts(data=sample_weight, is_matrix=False)
557
        for i in range(n_parts):
558
            parts[i]['weight'] = weight_parts[i]
559
560
561

    if group is not None:
        group_parts = _split_to_parts(data=group, is_matrix=False)
562
        for i in range(n_parts):
563
            parts[i]['group'] = group_parts[i]
564

565
566
567
568
569
    if init_score is not None:
        init_score_parts = _split_to_parts(data=init_score, is_matrix=False)
        for i in range(n_parts):
            parts[i]['init_score'] = init_score_parts[i]

570
571
572
573
574
575
576
    # evals_set will to be re-constructed into smaller lists of (X, y) tuples, where
    # X and y are each delayed sub-lists of original eval dask Collections.
    if eval_set:
        # find maximum number of parts in an individual eval set so that we can
        # pad eval sets when they come in different sizes.
        n_largest_eval_parts = max(x[0].npartitions for x in eval_set)

577
578
579
580
581
582
583
584
585
586
587
588
        eval_sets: Dict[
            int,
            List[
                Union[
                    _DatasetNames,
                    Tuple[
                        List[Optional[_DaskMatrixLike]],
                        List[Optional[_DaskVectorLike]]
                    ]
                ]
            ]
        ] = defaultdict(list)
589
        if eval_sample_weight:
590
591
592
593
594
595
596
597
598
            eval_sample_weights: Dict[
                int,
                List[
                    Union[
                        _DatasetNames,
                        List[Optional[_DaskVectorLike]]
                    ]
                ]
            ] = defaultdict(list)
599
        if eval_group:
600
601
602
603
604
605
606
607
608
            eval_groups: Dict[
                int,
                List[
                    Union[
                        _DatasetNames,
                        List[Optional[_DaskVectorLike]]
                    ]
                ]
            ] = defaultdict(list)
609
        if eval_init_score:
610
611
612
613
614
615
616
617
618
            eval_init_scores: Dict[
                int,
                List[
                    Union[
                        _DatasetNames,
                        List[Optional[_DaskMatrixLike]]
                    ]
                ]
            ] = defaultdict(list)
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645

        for i, (X_eval, y_eval) in enumerate(eval_set):
            n_this_eval_parts = X_eval.npartitions

            # when individual eval set is equivalent to training data, skip recomputing parts.
            if X_eval is data and y_eval is label:
                for parts_idx in range(n_parts):
                    eval_sets[parts_idx].append(_DatasetNames.TRAINSET)
            else:
                eval_x_parts = _split_to_parts(data=X_eval, is_matrix=True)
                eval_y_parts = _split_to_parts(data=y_eval, is_matrix=False)
                for j in range(n_largest_eval_parts):
                    parts_idx = j % n_parts

                    # add None-padding for individual eval_set member if it is smaller than the largest member.
                    if j < n_this_eval_parts:
                        x_e = eval_x_parts[j]
                        y_e = eval_y_parts[j]
                    else:
                        x_e = None
                        y_e = None

                    if j < n_parts:
                        # first time a chunk of this eval set is added to this part.
                        eval_sets[parts_idx].append(([x_e], [y_e]))
                    else:
                        # append additional chunks of this eval set to this part.
646
647
                        eval_sets[parts_idx][-1][0].append(x_e)  # type: ignore[index, union-attr]
                        eval_sets[parts_idx][-1][1].append(y_e)  # type: ignore[index, union-attr]
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666

            if eval_sample_weight:
                if eval_sample_weight[i] is sample_weight:
                    for parts_idx in range(n_parts):
                        eval_sample_weights[parts_idx].append(_DatasetNames.SAMPLE_WEIGHT)
                else:
                    eval_w_parts = _split_to_parts(data=eval_sample_weight[i], is_matrix=False)

                    # ensure that all evaluation parts map uniquely to one part.
                    for j in range(n_largest_eval_parts):
                        if j < n_this_eval_parts:
                            w_e = eval_w_parts[j]
                        else:
                            w_e = None

                        parts_idx = j % n_parts
                        if j < n_parts:
                            eval_sample_weights[parts_idx].append([w_e])
                        else:
667
                            eval_sample_weights[parts_idx][-1].append(w_e)  # type: ignore[union-attr]
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684

            if eval_init_score:
                if eval_init_score[i] is init_score:
                    for parts_idx in range(n_parts):
                        eval_init_scores[parts_idx].append(_DatasetNames.INIT_SCORE)
                else:
                    eval_init_score_parts = _split_to_parts(data=eval_init_score[i], is_matrix=False)
                    for j in range(n_largest_eval_parts):
                        if j < n_this_eval_parts:
                            init_score_e = eval_init_score_parts[j]
                        else:
                            init_score_e = None

                        parts_idx = j % n_parts
                        if j < n_parts:
                            eval_init_scores[parts_idx].append([init_score_e])
                        else:
685
                            eval_init_scores[parts_idx][-1].append(init_score_e)  # type: ignore[union-attr]
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702

            if eval_group:
                if eval_group[i] is group:
                    for parts_idx in range(n_parts):
                        eval_groups[parts_idx].append(_DatasetNames.GROUP)
                else:
                    eval_g_parts = _split_to_parts(data=eval_group[i], is_matrix=False)
                    for j in range(n_largest_eval_parts):
                        if j < n_this_eval_parts:
                            g_e = eval_g_parts[j]
                        else:
                            g_e = None

                        parts_idx = j % n_parts
                        if j < n_parts:
                            eval_groups[parts_idx].append([g_e])
                        else:
703
                            eval_groups[parts_idx][-1].append(g_e)  # type: ignore[union-attr]
704
705
706
707
708
709
710
711
712
713
714

        # assign sub-eval_set components to worker parts.
        for parts_idx, e_set in eval_sets.items():
            parts[parts_idx]['eval_set'] = e_set
            if eval_sample_weight:
                parts[parts_idx]['eval_sample_weight'] = eval_sample_weights[parts_idx]
            if eval_init_score:
                parts[parts_idx]['eval_init_score'] = eval_init_scores[parts_idx]
            if eval_group:
                parts[parts_idx]['eval_group'] = eval_groups[parts_idx]

715
    # Start computation in the background
716
    parts = list(map(delayed, parts))
717
718
719
720
    parts = client.compute(parts)
    wait(parts)

    for part in parts:
721
        if part.status == 'error':  # type: ignore
722
723
            # trigger error locally
            return part  # type: ignore[return-value]
724
725

    # Find locations of all parts and map them to particular Dask workers
726
    key_to_part_dict = {part.key: part for part in parts}  # type: ignore
727
728
729
730
731
    who_has = client.who_has(parts)
    worker_map = defaultdict(list)
    for key, workers in who_has.items():
        worker_map[next(iter(workers))].append(key_to_part_dict[key])

732
733
734
735
736
737
    # Check that all workers were provided some of eval_set. Otherwise warn user that validation
    # data artifacts may not be populated depending on worker returning final estimator.
    if eval_set:
        for worker in worker_map:
            has_eval_set = False
            for part in worker_map[worker]:
738
                if 'eval_set' in part.result():  # type: ignore[attr-defined]
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
                    has_eval_set = True
                    break

            if not has_eval_set:
                _log_warning(
                    f"Worker {worker} was not allocated eval_set data. Therefore evals_result_ and best_score_ data may be unreliable. "
                    "Try rebalancing data across workers."
                )

    # assign general validation set settings to fit kwargs.
    if eval_names:
        kwargs['eval_names'] = eval_names
    if eval_class_weight:
        kwargs['eval_class_weight'] = eval_class_weight
    if eval_metric:
        kwargs['eval_metric'] = eval_metric
    if eval_at:
        kwargs['eval_at'] = eval_at

758
759
760
    master_worker = next(iter(worker_map))
    worker_ncores = client.ncores()

761
762
763
764
765
766
    # resolve aliases for network parameters and pop the result off params.
    # these values are added back in calls to `_train_part()`
    params = _choose_param_value(
        main_param_name="local_listen_port",
        params=params,
        default_value=12400
767
    )
768
769
770
771
772
773
774
775
776
777
    local_listen_port = params.pop("local_listen_port")

    params = _choose_param_value(
        main_param_name="machines",
        params=params,
        default_value=None
    )
    machines = params.pop("machines")

    # figure out network params
778
    worker_to_socket_future: Dict[str, Future] = {}
779
780
781
782
783
784
785
786
787
788
    worker_addresses = worker_map.keys()
    if machines is not None:
        _log_info("Using passed-in 'machines' parameter")
        worker_address_to_port = _machines_to_worker_map(
            machines=machines,
            worker_addresses=worker_addresses
        )
    else:
        if listen_port_in_params:
            _log_info("Using passed-in 'local_listen_port' for all workers")
789
            unique_hosts = {urlparse(a).hostname for a in worker_addresses}
790
791
792
793
794
795
796
797
798
799
800
801
802
803
            if len(unique_hosts) < len(worker_addresses):
                msg = (
                    "'local_listen_port' was provided in Dask training parameters, but at least one "
                    "machine in the cluster has multiple Dask worker processes running on it. Please omit "
                    "'local_listen_port' or pass 'machines'."
                )
                raise LightGBMError(msg)

            worker_address_to_port = {
                address: local_listen_port
                for address in worker_addresses
            }
        else:
            _log_info("Finding random open ports for workers")
804
            worker_to_socket_future, worker_address_to_port = _assign_open_ports_to_workers(client, list(worker_map.keys()))
805

806
        machines = ','.join([
807
            f'{urlparse(worker_address).hostname}:{port}'
808
809
810
811
812
            for worker_address, port
            in worker_address_to_port.items()
        ])

    num_machines = len(worker_address_to_port)
813

814
    # Tell each worker to train on the parts that it has locally
815
    #
816
    # This code treats ``_train_part()`` calls as not "pure" because:
817
    #     1. there is randomness in the training process unless parameters ``seed``
818
    #        and ``deterministic`` are set
819
820
821
    #     2. even with those parameters set, the output of one ``_train_part()`` call
    #        relies on global state (it and all the other LightGBM training processes
    #        coordinate with each other)
822
823
824
825
826
827
    futures_classifiers = [
        client.submit(
            _train_part,
            model_factory=model_factory,
            params={**params, 'num_threads': worker_ncores[worker]},
            list_of_parts=list_of_parts,
828
829
830
            machines=machines,
            local_listen_port=worker_address_to_port[worker],
            num_machines=num_machines,
831
            time_out=params.get('time_out', 120),
832
            remote_socket=worker_to_socket_future.get(worker, None),
833
            return_model=(worker == master_worker),
834
835
836
            workers=[worker],
            allow_other_workers=False,
            pure=False,
837
838
839
840
            **kwargs
        )
        for worker, list_of_parts in worker_map.items()
    ]
841
842
843

    results = client.gather(futures_classifiers)
    results = [v for v in results if v]
844
845
846
    model = results[0]

    # if network parameters were changed during training, remove them from the
Andrew Ziem's avatar
Andrew Ziem committed
847
    # returned model so that they're generated dynamically on every run based
848
849
850
851
852
853
854
855
856
857
858
859
860
861
    # on the Dask cluster you're connected to and which workers have pieces of
    # the training data
    if not listen_port_in_params:
        for param in _ConfigAliases.get('local_listen_port'):
            model._other_params.pop(param, None)

    if not machines_in_params:
        for param in _ConfigAliases.get('machines'):
            model._other_params.pop(param, None)

    for param in _ConfigAliases.get('num_machines', 'timeout'):
        model._other_params.pop(param, None)

    return model
862
863


864
865
866
867
868
869
870
871
872
def _predict_part(
    part: _DaskPart,
    model: LGBMModel,
    raw_score: bool,
    pred_proba: bool,
    pred_leaf: bool,
    pred_contrib: bool,
    **kwargs: Any
) -> _DaskPart:
873

874
    result: _DaskPart
875
    if part.shape[0] == 0:
876
        result = np.array([])
877
878
    elif pred_proba:
        result = model.predict_proba(
879
            part,
880
881
882
883
884
            raw_score=raw_score,
            pred_leaf=pred_leaf,
            pred_contrib=pred_contrib,
            **kwargs
        )
885
    else:
886
        result = model.predict(
887
            part,
888
889
890
891
892
            raw_score=raw_score,
            pred_leaf=pred_leaf,
            pred_contrib=pred_contrib,
            **kwargs
        )
893

894
    # dask.DataFrame.map_partitions() expects each call to return a pandas DataFrame or Series
895
    if isinstance(part, pd_DataFrame):
896
        if len(result.shape) == 2:
897
            result = pd_DataFrame(result, index=part.index)
898
        else:
899
            result = pd_Series(result, index=part.index, name='predictions')
900
901
902
903

    return result


904
905
906
def _predict(
    model: LGBMModel,
    data: _DaskMatrixLike,
907
    client: Client,
908
909
910
911
912
913
    raw_score: bool = False,
    pred_proba: bool = False,
    pred_leaf: bool = False,
    pred_contrib: bool = False,
    dtype: _PredictionDtype = np.float32,
    **kwargs: Any
914
) -> Union[dask_Array, List[dask_Array]]:
915
916
917
918
    """Inner predict routine.

    Parameters
    ----------
919
    model : lightgbm.LGBMClassifier, lightgbm.LGBMRegressor, or lightgbm.LGBMRanker class
920
        Fitted underlying model.
921
    data : Dask Array or Dask DataFrame of shape = [n_samples, n_features]
922
        Input feature matrix.
923
924
    raw_score : bool, optional (default=False)
        Whether to predict raw scores.
925
926
927
928
929
930
    pred_proba : bool, optional (default=False)
        Should method return results of ``predict_proba`` (``pred_proba=True``) or ``predict`` (``pred_proba=False``).
    pred_leaf : bool, optional (default=False)
        Whether to predict leaf index.
    pred_contrib : bool, optional (default=False)
        Whether to predict feature contributions.
931
    dtype : np.dtype, optional (default=np.float32)
932
        Dtype of the output.
933
    **kwargs
934
        Other parameters passed to ``predict`` or ``predict_proba`` method.
935
936
937

    Returns
    -------
938
    predicted_result : Dask Array of shape = [n_samples] or shape = [n_samples, n_classes]
939
        The predicted values.
940
    X_leaves : Dask Array of shape = [n_samples, n_trees] or shape = [n_samples, n_trees * n_classes]
941
        If ``pred_leaf=True``, the predicted leaf of every tree for each sample.
942
    X_SHAP_values : Dask Array of shape = [n_samples, n_features + 1] or shape = [n_samples, (n_features + 1) * n_classes] or (if multi-class and using sparse inputs) a list of ``n_classes`` Dask Arrays of shape = [n_samples, n_features + 1]
943
        If ``pred_contrib=True``, the feature contributions for each sample.
944
    """
945
946
    if not all((DASK_INSTALLED, PANDAS_INSTALLED, SKLEARN_INSTALLED)):
        raise LightGBMError('dask, pandas and scikit-learn are required for lightgbm.dask')
947
    if isinstance(data, dask_DataFrame):
948
949
950
951
952
953
954
955
956
        return data.map_partitions(
            _predict_part,
            model=model,
            raw_score=raw_score,
            pred_proba=pred_proba,
            pred_leaf=pred_leaf,
            pred_contrib=pred_contrib,
            **kwargs
        ).values
957
    elif isinstance(data, dask_Array):
958
959
        # for multi-class classification with sparse matrices, pred_contrib predictions
        # are returned as a list of sparse matrices (one per class)
960
        num_classes = model._n_classes
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991

        if (
            num_classes > 2
            and pred_contrib
            and isinstance(data._meta, ss.spmatrix)
        ):

            predict_function = partial(
                _predict_part,
                model=model,
                raw_score=False,
                pred_proba=pred_proba,
                pred_leaf=False,
                pred_contrib=True,
                **kwargs
            )

            delayed_chunks = data.to_delayed()
            bag = dask_bag_from_delayed(delayed_chunks[:, 0])

            @delayed
            def _extract(items: List[Any], i: int) -> Any:
                return items[i]

            preds = bag.map_partitions(predict_function)

            # pred_contrib output will have one column per feature,
            # plus one more for the base value
            num_cols = model.n_features_ + 1

            nrows_per_chunk = data.chunks[0]
992
            out: List[List[dask_Array]] = [[] for _ in range(num_classes)]
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016

            # need to tell Dask the expected type and shape of individual preds
            pred_meta = data._meta

            for j, partition in enumerate(preds.to_delayed()):
                for i in range(num_classes):
                    part = dask_array_from_delayed(
                        value=_extract(partition, i),
                        shape=(nrows_per_chunk[j], num_cols),
                        meta=pred_meta
                    )
                    out[i].append(part)

            # by default, dask.array.concatenate() concatenates sparse arrays into a COO matrix
            # the code below is used instead to ensure that the sparse type is preserved during concatentation
            if isinstance(pred_meta, ss.csr_matrix):
                concat_fn = partial(ss.vstack, format='csr')
            elif isinstance(pred_meta, ss.csc_matrix):
                concat_fn = partial(ss.vstack, format='csc')
            else:
                concat_fn = ss.vstack

            # At this point, `out` is a list of lists of delayeds (each of which points to a matrix).
            # Concatenate them to return a list of Dask Arrays.
1017
            out_arrays: List[dask_Array] = []
1018
            for i in range(num_classes):
1019
1020
1021
1022
1023
1024
                out_arrays.append(
                    dask_array_from_delayed(
                        value=delayed(concat_fn)(out[i]),
                        shape=(data.shape[0], num_cols),
                        meta=pred_meta
                    )
1025
1026
                )

1027
            return out_arrays
1028

1029
1030
        data_row = client.compute(data[[0]]).result()
        predict_fn = partial(
1031
1032
1033
1034
1035
1036
            _predict_part,
            model=model,
            raw_score=raw_score,
            pred_proba=pred_proba,
            pred_leaf=pred_leaf,
            pred_contrib=pred_contrib,
1037
1038
1039
            **kwargs,
        )
        pred_row = predict_fn(data_row)
1040
        chunks: Tuple[int, ...] = (data.chunks[0],)
1041
1042
1043
1044
1045
1046
1047
1048
1049
        map_blocks_kwargs = {}
        if len(pred_row.shape) > 1:
            chunks += (pred_row.shape[1],)
        else:
            map_blocks_kwargs['drop_axis'] = 1
        return data.map_blocks(
            predict_fn,
            chunks=chunks,
            meta=pred_row,
1050
            dtype=dtype,
1051
            **map_blocks_kwargs,
1052
        )
1053
    else:
1054
        raise TypeError(f'Data must be either Dask Array or Dask DataFrame. Got {type(data).__name__}.')
1055
1056


1057
class _DaskLGBMModel:
1058

1059
1060
    @property
    def client_(self) -> Client:
1061
        """:obj:`dask.distributed.Client`: Dask client.
1062
1063
1064
1065
1066
1067
1068
1069
1070

        This property can be passed in the constructor or updated
        with ``model.set_params(client=client)``.
        """
        if not getattr(self, "fitted_", False):
            raise LGBMNotFittedError('Cannot access property client_ before calling fit().')

        return _get_dask_client(client=self.client)

1071
    def _lgb_dask_getstate(self) -> Dict[Any, Any]:
1072
1073
        """Remove un-picklable attributes before serialization."""
        client = self.__dict__.pop("client", None)
1074
        self._other_params.pop("client", None)  # type: ignore[attr-defined]
1075
        out = deepcopy(self.__dict__)
1076
        out.update({"client": None})
1077
1078
1079
        self.client = client
        return out

1080
    def _lgb_dask_fit(
1081
1082
1083
1084
        self,
        model_factory: Type[LGBMModel],
        X: _DaskMatrixLike,
        y: _DaskCollection,
1085
        sample_weight: Optional[_DaskVectorLike] = None,
1086
        init_score: Optional[_DaskCollection] = None,
1087
        group: Optional[_DaskVectorLike] = None,
1088
1089
        eval_set: Optional[List[Tuple[_DaskMatrixLike, _DaskCollection]]] = None,
        eval_names: Optional[List[str]] = None,
1090
        eval_sample_weight: Optional[List[_DaskVectorLike]] = None,
1091
        eval_class_weight: Optional[List[Union[dict, str]]] = None,
1092
        eval_init_score: Optional[List[_DaskCollection]] = None,
1093
        eval_group: Optional[List[_DaskVectorLike]] = None,
1094
        eval_metric: Optional[_LGBM_ScikitEvalMetricType] = None,
1095
        eval_at: Optional[Union[List[int], Tuple[int, ...]]] = None,
1096
1097
        **kwargs: Any
    ) -> "_DaskLGBMModel":
1098
1099
        if not DASK_INSTALLED:
            raise LightGBMError('dask is required for lightgbm.dask')
1100
1101
        if not all((DASK_INSTALLED, PANDAS_INSTALLED, SKLEARN_INSTALLED)):
            raise LightGBMError('dask, pandas and scikit-learn are required for lightgbm.dask')
1102

1103
        params = self.get_params(True)  # type: ignore[attr-defined]
1104
        params.pop("client", None)
1105
1106

        model = _train(
1107
            client=_get_dask_client(self.client),
1108
1109
1110
1111
1112
            data=X,
            label=y,
            params=params,
            model_factory=model_factory,
            sample_weight=sample_weight,
1113
            init_score=init_score,
1114
            group=group,
1115
1116
1117
1118
1119
1120
1121
1122
            eval_set=eval_set,
            eval_names=eval_names,
            eval_sample_weight=eval_sample_weight,
            eval_class_weight=eval_class_weight,
            eval_init_score=eval_init_score,
            eval_group=eval_group,
            eval_metric=eval_metric,
            eval_at=eval_at,
1123
1124
            **kwargs
        )
1125

1126
1127
        self.set_params(**model.get_params())  # type: ignore[attr-defined]
        self._lgb_dask_copy_extra_params(model, self)  # type: ignore[attr-defined]
1128
1129
1130

        return self

1131
    def _lgb_dask_to_local(self, model_factory: Type[LGBMModel]) -> LGBMModel:
1132
        params = self.get_params()  # type: ignore[attr-defined]
1133
1134
        params.pop("client", None)
        model = model_factory(**params)
1135
        self._lgb_dask_copy_extra_params(self, model)
1136
        model._other_params.pop("client", None)
1137
1138
1139
        return model

    @staticmethod
1140
    def _lgb_dask_copy_extra_params(source: Union["_DaskLGBMModel", LGBMModel], dest: Union["_DaskLGBMModel", LGBMModel]) -> None:
1141
        params = source.get_params()  # type: ignore[union-attr]
1142
1143
1144
        attributes = source.__dict__
        extra_param_names = set(attributes.keys()).difference(params.keys())
        for name in extra_param_names:
1145
            setattr(dest, name, attributes[name])
1146
1147


1148
class DaskLGBMClassifier(LGBMClassifier, _DaskLGBMModel):
1149
1150
    """Distributed version of lightgbm.LGBMClassifier."""

1151
1152
1153
1154
1155
1156
1157
1158
    def __init__(
        self,
        boosting_type: str = 'gbdt',
        num_leaves: int = 31,
        max_depth: int = -1,
        learning_rate: float = 0.1,
        n_estimators: int = 100,
        subsample_for_bin: int = 200000,
1159
        objective: Optional[Union[str, _LGBM_ScikitCustomObjectiveFunction]] = None,
1160
1161
1162
1163
1164
1165
1166
1167
1168
        class_weight: Optional[Union[dict, str]] = None,
        min_split_gain: float = 0.,
        min_child_weight: float = 1e-3,
        min_child_samples: int = 20,
        subsample: float = 1.,
        subsample_freq: int = 0,
        colsample_bytree: float = 1.,
        reg_alpha: float = 0.,
        reg_lambda: float = 0.,
1169
        random_state: Optional[Union[int, np.random.RandomState, 'np.random.Generator']] = None,
1170
        n_jobs: Optional[int] = None,
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
        importance_type: str = 'split',
        client: Optional[Client] = None,
        **kwargs: Any
    ):
        """Docstring is inherited from the lightgbm.LGBMClassifier.__init__."""
        self.client = client
        super().__init__(
            boosting_type=boosting_type,
            num_leaves=num_leaves,
            max_depth=max_depth,
            learning_rate=learning_rate,
            n_estimators=n_estimators,
            subsample_for_bin=subsample_for_bin,
            objective=objective,
            class_weight=class_weight,
            min_split_gain=min_split_gain,
            min_child_weight=min_child_weight,
            min_child_samples=min_child_samples,
            subsample=subsample,
            subsample_freq=subsample_freq,
            colsample_bytree=colsample_bytree,
            reg_alpha=reg_alpha,
            reg_lambda=reg_lambda,
            random_state=random_state,
            n_jobs=n_jobs,
            importance_type=importance_type,
            **kwargs
        )

    _base_doc = LGBMClassifier.__init__.__doc__
1201
    _before_kwargs, _kwargs, _after_kwargs = _base_doc.partition('**kwargs')  # type: ignore
1202
    __init__.__doc__ = f"""
1203
1204
1205
1206
        {_before_kwargs}client : dask.distributed.Client or None, optional (default=None)
        {' ':4}Dask client. If ``None``, ``distributed.default_client()`` will be used at runtime. The Dask client used by this class will not be saved if the model object is pickled.
        {_kwargs}{_after_kwargs}
        """
1207
1208

    def __getstate__(self) -> Dict[Any, Any]:
1209
        return self._lgb_dask_getstate()
1210

1211
    def fit(  # type: ignore[override]
1212
1213
1214
        self,
        X: _DaskMatrixLike,
        y: _DaskCollection,
1215
        sample_weight: Optional[_DaskVectorLike] = None,
1216
        init_score: Optional[_DaskCollection] = None,
1217
1218
        eval_set: Optional[List[Tuple[_DaskMatrixLike, _DaskCollection]]] = None,
        eval_names: Optional[List[str]] = None,
1219
        eval_sample_weight: Optional[List[_DaskVectorLike]] = None,
1220
        eval_class_weight: Optional[List[Union[dict, str]]] = None,
1221
        eval_init_score: Optional[List[_DaskCollection]] = None,
1222
        eval_metric: Optional[_LGBM_ScikitEvalMetricType] = None,
1223
1224
        **kwargs: Any
    ) -> "DaskLGBMClassifier":
1225
        """Docstring is inherited from the lightgbm.LGBMClassifier.fit."""
1226
        self._lgb_dask_fit(
1227
1228
1229
1230
            model_factory=LGBMClassifier,
            X=X,
            y=y,
            sample_weight=sample_weight,
1231
            init_score=init_score,
1232
1233
1234
1235
1236
1237
            eval_set=eval_set,
            eval_names=eval_names,
            eval_sample_weight=eval_sample_weight,
            eval_class_weight=eval_class_weight,
            eval_init_score=eval_init_score,
            eval_metric=eval_metric,
1238
1239
            **kwargs
        )
1240
        return self
1241

1242
1243
1244
    _base_doc = _lgbmmodel_doc_fit.format(
        X_shape="Dask Array or Dask DataFrame of shape = [n_samples, n_features]",
        y_shape="Dask Array, Dask DataFrame or Dask Series of shape = [n_samples]",
1245
        sample_weight_shape="Dask Array or Dask Series of shape = [n_samples] or None, optional (default=None)",
1246
        init_score_shape="Dask Array or Dask Series of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task), or Dask Array or Dask DataFrame of shape = [n_samples, n_classes] (for multi-class task), or None, optional (default=None)",
1247
        group_shape="Dask Array or Dask Series or None, optional (default=None)",
1248
        eval_sample_weight_shape="list of Dask Array or Dask Series, or None, optional (default=None)",
1249
        eval_init_score_shape="list of Dask Array, Dask Series or Dask DataFrame (for multi-class task), or None, optional (default=None)",
1250
        eval_group_shape="list of Dask Array or Dask Series, or None, optional (default=None)"
1251
1252
    )

1253
    # DaskLGBMClassifier does not support group, eval_group.
1254
    _base_doc = (_base_doc[:_base_doc.find('group :')]
1255
1256
1257
1258
1259
                 + _base_doc[_base_doc.find('eval_set :'):])

    _base_doc = (_base_doc[:_base_doc.find('eval_group :')]
                 + _base_doc[_base_doc.find('eval_metric :'):])

1260
    # DaskLGBMClassifier support for callbacks and init_model is not tested
1261
1262
    fit.__doc__ = f"""{_base_doc[:_base_doc.find('callbacks :')]}**kwargs
        Other parameters passed through to ``LGBMClassifier.fit()``.
1263

1264
1265
1266
1267
1268
    Returns
    -------
    self : lightgbm.DaskLGBMClassifier
        Returns self.

1269
    {_lgbmmodel_doc_custom_eval_note}
1270
        """
1271

1272
1273
    def predict(
        self,
1274
        X: _DaskMatrixLike,  # type: ignore[override]
1275
1276
1277
1278
1279
1280
1281
1282
        raw_score: bool = False,
        start_iteration: int = 0,
        num_iteration: Optional[int] = None,
        pred_leaf: bool = False,
        pred_contrib: bool = False,
        validate_features: bool = False,
        **kwargs: Any
    ) -> dask_Array:
1283
        """Docstring is inherited from the lightgbm.LGBMClassifier.predict."""
1284
1285
1286
1287
        return _predict(
            model=self.to_local(),
            data=X,
            dtype=self.classes_.dtype,
1288
            client=_get_dask_client(self.client),
1289
1290
1291
1292
1293
1294
            raw_score=raw_score,
            start_iteration=start_iteration,
            num_iteration=num_iteration,
            pred_leaf=pred_leaf,
            pred_contrib=pred_contrib,
            validate_features=validate_features,
1295
1296
1297
            **kwargs
        )

1298
1299
1300
1301
1302
1303
    predict.__doc__ = _lgbmmodel_doc_predict.format(
        description="Return the predicted value for each sample.",
        X_shape="Dask Array or Dask DataFrame of shape = [n_samples, n_features]",
        output_name="predicted_result",
        predicted_result_shape="Dask Array of shape = [n_samples] or shape = [n_samples, n_classes]",
        X_leaves_shape="Dask Array of shape = [n_samples, n_trees] or shape = [n_samples, n_trees * n_classes]",
1304
        X_SHAP_values_shape="Dask Array of shape = [n_samples, n_features + 1] or shape = [n_samples, (n_features + 1) * n_classes] or (if multi-class and using sparse inputs) a list of ``n_classes`` Dask Arrays of shape = [n_samples, n_features + 1]"
1305
    )
1306

1307
1308
    def predict_proba(
        self,
1309
        X: _DaskMatrixLike,  # type: ignore[override]
1310
1311
1312
1313
1314
1315
1316
1317
        raw_score: bool = False,
        start_iteration: int = 0,
        num_iteration: Optional[int] = None,
        pred_leaf: bool = False,
        pred_contrib: bool = False,
        validate_features: bool = False,
        **kwargs: Any
    ) -> dask_Array:
1318
        """Docstring is inherited from the lightgbm.LGBMClassifier.predict_proba."""
1319
1320
1321
1322
        return _predict(
            model=self.to_local(),
            data=X,
            pred_proba=True,
1323
            client=_get_dask_client(self.client),
1324
1325
1326
1327
1328
1329
            raw_score=raw_score,
            start_iteration=start_iteration,
            num_iteration=num_iteration,
            pred_leaf=pred_leaf,
            pred_contrib=pred_contrib,
            validate_features=validate_features,
1330
1331
1332
            **kwargs
        )

1333
1334
1335
1336
    predict_proba.__doc__ = _lgbmmodel_doc_predict.format(
        description="Return the predicted probability for each class for each sample.",
        X_shape="Dask Array or Dask DataFrame of shape = [n_samples, n_features]",
        output_name="predicted_probability",
1337
        predicted_result_shape="Dask Array of shape = [n_samples] or shape = [n_samples, n_classes]",
1338
        X_leaves_shape="Dask Array of shape = [n_samples, n_trees] or shape = [n_samples, n_trees * n_classes]",
1339
        X_SHAP_values_shape="Dask Array of shape = [n_samples, n_features + 1] or shape = [n_samples, (n_features + 1) * n_classes] or (if multi-class and using sparse inputs) a list of ``n_classes`` Dask Arrays of shape = [n_samples, n_features + 1]"
1340
    )
1341

1342
    def to_local(self) -> LGBMClassifier:
1343
1344
1345
1346
1347
        """Create regular version of lightgbm.LGBMClassifier from the distributed version.

        Returns
        -------
        model : lightgbm.LGBMClassifier
1348
            Local underlying model.
1349
        """
1350
        return self._lgb_dask_to_local(LGBMClassifier)
1351
1352


1353
class DaskLGBMRegressor(LGBMRegressor, _DaskLGBMModel):
1354
    """Distributed version of lightgbm.LGBMRegressor."""
1355

1356
1357
1358
1359
1360
1361
1362
1363
    def __init__(
        self,
        boosting_type: str = 'gbdt',
        num_leaves: int = 31,
        max_depth: int = -1,
        learning_rate: float = 0.1,
        n_estimators: int = 100,
        subsample_for_bin: int = 200000,
1364
        objective: Optional[Union[str, _LGBM_ScikitCustomObjectiveFunction]] = None,
1365
1366
1367
1368
1369
1370
1371
1372
1373
        class_weight: Optional[Union[dict, str]] = None,
        min_split_gain: float = 0.,
        min_child_weight: float = 1e-3,
        min_child_samples: int = 20,
        subsample: float = 1.,
        subsample_freq: int = 0,
        colsample_bytree: float = 1.,
        reg_alpha: float = 0.,
        reg_lambda: float = 0.,
1374
        random_state: Optional[Union[int, np.random.RandomState, 'np.random.Generator']] = None,
1375
        n_jobs: Optional[int] = None,
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
        importance_type: str = 'split',
        client: Optional[Client] = None,
        **kwargs: Any
    ):
        """Docstring is inherited from the lightgbm.LGBMRegressor.__init__."""
        self.client = client
        super().__init__(
            boosting_type=boosting_type,
            num_leaves=num_leaves,
            max_depth=max_depth,
            learning_rate=learning_rate,
            n_estimators=n_estimators,
            subsample_for_bin=subsample_for_bin,
            objective=objective,
            class_weight=class_weight,
            min_split_gain=min_split_gain,
            min_child_weight=min_child_weight,
            min_child_samples=min_child_samples,
            subsample=subsample,
            subsample_freq=subsample_freq,
            colsample_bytree=colsample_bytree,
            reg_alpha=reg_alpha,
            reg_lambda=reg_lambda,
            random_state=random_state,
            n_jobs=n_jobs,
            importance_type=importance_type,
            **kwargs
        )

    _base_doc = LGBMRegressor.__init__.__doc__
1406
    _before_kwargs, _kwargs, _after_kwargs = _base_doc.partition('**kwargs')  # type: ignore
1407
    __init__.__doc__ = f"""
1408
1409
1410
1411
        {_before_kwargs}client : dask.distributed.Client or None, optional (default=None)
        {' ':4}Dask client. If ``None``, ``distributed.default_client()`` will be used at runtime. The Dask client used by this class will not be saved if the model object is pickled.
        {_kwargs}{_after_kwargs}
        """
1412

1413
    def __getstate__(self) -> Dict[Any, Any]:
1414
        return self._lgb_dask_getstate()
1415

1416
    def fit(  # type: ignore[override]
1417
1418
1419
        self,
        X: _DaskMatrixLike,
        y: _DaskCollection,
1420
1421
        sample_weight: Optional[_DaskVectorLike] = None,
        init_score: Optional[_DaskVectorLike] = None,
1422
1423
        eval_set: Optional[List[Tuple[_DaskMatrixLike, _DaskCollection]]] = None,
        eval_names: Optional[List[str]] = None,
1424
1425
        eval_sample_weight: Optional[List[_DaskVectorLike]] = None,
        eval_init_score: Optional[List[_DaskVectorLike]] = None,
1426
        eval_metric: Optional[_LGBM_ScikitEvalMetricType] = None,
1427
1428
        **kwargs: Any
    ) -> "DaskLGBMRegressor":
1429
        """Docstring is inherited from the lightgbm.LGBMRegressor.fit."""
1430
        self._lgb_dask_fit(
1431
1432
1433
1434
            model_factory=LGBMRegressor,
            X=X,
            y=y,
            sample_weight=sample_weight,
1435
            init_score=init_score,
1436
1437
1438
1439
1440
            eval_set=eval_set,
            eval_names=eval_names,
            eval_sample_weight=eval_sample_weight,
            eval_init_score=eval_init_score,
            eval_metric=eval_metric,
1441
1442
            **kwargs
        )
1443
        return self
1444

1445
1446
1447
    _base_doc = _lgbmmodel_doc_fit.format(
        X_shape="Dask Array or Dask DataFrame of shape = [n_samples, n_features]",
        y_shape="Dask Array, Dask DataFrame or Dask Series of shape = [n_samples]",
1448
1449
        sample_weight_shape="Dask Array or Dask Series of shape = [n_samples] or None, optional (default=None)",
        init_score_shape="Dask Array or Dask Series of shape = [n_samples] or None, optional (default=None)",
1450
        group_shape="Dask Array or Dask Series or None, optional (default=None)",
1451
1452
1453
        eval_sample_weight_shape="list of Dask Array or Dask Series, or None, optional (default=None)",
        eval_init_score_shape="list of Dask Array or Dask Series, or None, optional (default=None)",
        eval_group_shape="list of Dask Array or Dask Series, or None, optional (default=None)"
1454
1455
    )

1456
    # DaskLGBMRegressor does not support group, eval_class_weight, eval_group.
1457
    _base_doc = (_base_doc[:_base_doc.find('group :')]
1458
1459
1460
1461
1462
1463
1464
1465
                 + _base_doc[_base_doc.find('eval_set :'):])

    _base_doc = (_base_doc[:_base_doc.find('eval_class_weight :')]
                 + _base_doc[_base_doc.find('eval_init_score :'):])

    _base_doc = (_base_doc[:_base_doc.find('eval_group :')]
                 + _base_doc[_base_doc.find('eval_metric :'):])

1466
    # DaskLGBMRegressor support for callbacks and init_model is not tested
1467
1468
    fit.__doc__ = f"""{_base_doc[:_base_doc.find('callbacks :')]}**kwargs
        Other parameters passed through to ``LGBMRegressor.fit()``.
1469

1470
1471
1472
1473
1474
    Returns
    -------
    self : lightgbm.DaskLGBMRegressor
        Returns self.

1475
    {_lgbmmodel_doc_custom_eval_note}
1476
        """
1477

1478
1479
    def predict(
        self,
1480
        X: _DaskMatrixLike,  # type: ignore[override]
1481
1482
1483
1484
1485
1486
1487
1488
        raw_score: bool = False,
        start_iteration: int = 0,
        num_iteration: Optional[int] = None,
        pred_leaf: bool = False,
        pred_contrib: bool = False,
        validate_features: bool = False,
        **kwargs: Any
    ) -> dask_Array:
1489
        """Docstring is inherited from the lightgbm.LGBMRegressor.predict."""
1490
1491
1492
        return _predict(
            model=self.to_local(),
            data=X,
1493
            client=_get_dask_client(self.client),
1494
1495
1496
1497
1498
1499
            raw_score=raw_score,
            start_iteration=start_iteration,
            num_iteration=num_iteration,
            pred_leaf=pred_leaf,
            pred_contrib=pred_contrib,
            validate_features=validate_features,
1500
1501
1502
            **kwargs
        )

1503
1504
1505
1506
1507
1508
1509
1510
    predict.__doc__ = _lgbmmodel_doc_predict.format(
        description="Return the predicted value for each sample.",
        X_shape="Dask Array or Dask DataFrame of shape = [n_samples, n_features]",
        output_name="predicted_result",
        predicted_result_shape="Dask Array of shape = [n_samples]",
        X_leaves_shape="Dask Array of shape = [n_samples, n_trees]",
        X_SHAP_values_shape="Dask Array of shape = [n_samples, n_features + 1]"
    )
1511

1512
    def to_local(self) -> LGBMRegressor:
1513
1514
1515
1516
1517
        """Create regular version of lightgbm.LGBMRegressor from the distributed version.

        Returns
        -------
        model : lightgbm.LGBMRegressor
1518
            Local underlying model.
1519
        """
1520
        return self._lgb_dask_to_local(LGBMRegressor)
1521
1522


1523
class DaskLGBMRanker(LGBMRanker, _DaskLGBMModel):
1524
    """Distributed version of lightgbm.LGBMRanker."""
1525

1526
1527
1528
1529
1530
1531
1532
1533
    def __init__(
        self,
        boosting_type: str = 'gbdt',
        num_leaves: int = 31,
        max_depth: int = -1,
        learning_rate: float = 0.1,
        n_estimators: int = 100,
        subsample_for_bin: int = 200000,
1534
        objective: Optional[Union[str, _LGBM_ScikitCustomObjectiveFunction]] = None,
1535
1536
1537
1538
1539
1540
1541
1542
1543
        class_weight: Optional[Union[dict, str]] = None,
        min_split_gain: float = 0.,
        min_child_weight: float = 1e-3,
        min_child_samples: int = 20,
        subsample: float = 1.,
        subsample_freq: int = 0,
        colsample_bytree: float = 1.,
        reg_alpha: float = 0.,
        reg_lambda: float = 0.,
1544
        random_state: Optional[Union[int, np.random.RandomState, 'np.random.Generator']] = None,
1545
        n_jobs: Optional[int] = None,
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
        importance_type: str = 'split',
        client: Optional[Client] = None,
        **kwargs: Any
    ):
        """Docstring is inherited from the lightgbm.LGBMRanker.__init__."""
        self.client = client
        super().__init__(
            boosting_type=boosting_type,
            num_leaves=num_leaves,
            max_depth=max_depth,
            learning_rate=learning_rate,
            n_estimators=n_estimators,
            subsample_for_bin=subsample_for_bin,
            objective=objective,
            class_weight=class_weight,
            min_split_gain=min_split_gain,
            min_child_weight=min_child_weight,
            min_child_samples=min_child_samples,
            subsample=subsample,
            subsample_freq=subsample_freq,
            colsample_bytree=colsample_bytree,
            reg_alpha=reg_alpha,
            reg_lambda=reg_lambda,
            random_state=random_state,
            n_jobs=n_jobs,
            importance_type=importance_type,
            **kwargs
        )

    _base_doc = LGBMRanker.__init__.__doc__
1576
    _before_kwargs, _kwargs, _after_kwargs = _base_doc.partition('**kwargs')  # type: ignore
1577
    __init__.__doc__ = f"""
1578
1579
1580
1581
        {_before_kwargs}client : dask.distributed.Client or None, optional (default=None)
        {' ':4}Dask client. If ``None``, ``distributed.default_client()`` will be used at runtime. The Dask client used by this class will not be saved if the model object is pickled.
        {_kwargs}{_after_kwargs}
        """
1582
1583

    def __getstate__(self) -> Dict[Any, Any]:
1584
        return self._lgb_dask_getstate()
1585

1586
    def fit(  # type: ignore[override]
1587
1588
1589
        self,
        X: _DaskMatrixLike,
        y: _DaskCollection,
1590
1591
1592
        sample_weight: Optional[_DaskVectorLike] = None,
        init_score: Optional[_DaskVectorLike] = None,
        group: Optional[_DaskVectorLike] = None,
1593
1594
        eval_set: Optional[List[Tuple[_DaskMatrixLike, _DaskCollection]]] = None,
        eval_names: Optional[List[str]] = None,
1595
1596
1597
        eval_sample_weight: Optional[List[_DaskVectorLike]] = None,
        eval_init_score: Optional[List[_DaskVectorLike]] = None,
        eval_group: Optional[List[_DaskVectorLike]] = None,
1598
        eval_metric: Optional[_LGBM_ScikitEvalMetricType] = None,
1599
        eval_at: Union[List[int], Tuple[int, ...]] = (1, 2, 3, 4, 5),
1600
1601
        **kwargs: Any
    ) -> "DaskLGBMRanker":
1602
        """Docstring is inherited from the lightgbm.LGBMRanker.fit."""
1603
        self._lgb_dask_fit(
1604
1605
1606
1607
            model_factory=LGBMRanker,
            X=X,
            y=y,
            sample_weight=sample_weight,
1608
            init_score=init_score,
1609
            group=group,
1610
1611
1612
1613
1614
1615
1616
            eval_set=eval_set,
            eval_names=eval_names,
            eval_sample_weight=eval_sample_weight,
            eval_init_score=eval_init_score,
            eval_group=eval_group,
            eval_metric=eval_metric,
            eval_at=eval_at,
1617
1618
            **kwargs
        )
1619
        return self
1620

1621
1622
1623
    _base_doc = _lgbmmodel_doc_fit.format(
        X_shape="Dask Array or Dask DataFrame of shape = [n_samples, n_features]",
        y_shape="Dask Array, Dask DataFrame or Dask Series of shape = [n_samples]",
1624
1625
        sample_weight_shape="Dask Array or Dask Series of shape = [n_samples] or None, optional (default=None)",
        init_score_shape="Dask Array or Dask Series of shape = [n_samples] or None, optional (default=None)",
1626
        group_shape="Dask Array or Dask Series or None, optional (default=None)",
1627
1628
1629
        eval_sample_weight_shape="list of Dask Array or Dask Series, or None, optional (default=None)",
        eval_init_score_shape="list of Dask Array or Dask Series, or None, optional (default=None)",
        eval_group_shape="list of Dask Array or Dask Series, or None, optional (default=None)"
1630
1631
    )

1632
1633
1634
1635
    # DaskLGBMRanker does not support eval_class_weight or early stopping
    _base_doc = (_base_doc[:_base_doc.find('eval_class_weight :')]
                 + _base_doc[_base_doc.find('eval_init_score :'):])

1636
    _base_doc = (_base_doc[:_base_doc.find('feature_name :')]
1637
                 + "eval_at : list or tuple of int, optional (default=(1, 2, 3, 4, 5))\n"
1638
                 + f"{' ':8}The evaluation positions of the specified metric.\n"
1639
                 + f"{' ':4}{_base_doc[_base_doc.find('feature_name :'):]}")
1640
1641

    # DaskLGBMRanker support for callbacks and init_model is not tested
1642
1643
    fit.__doc__ = f"""{_base_doc[:_base_doc.find('callbacks :')]}**kwargs
        Other parameters passed through to ``LGBMRanker.fit()``.
1644

1645
1646
1647
1648
1649
    Returns
    -------
    self : lightgbm.DaskLGBMRanker
        Returns self.

1650
    {_lgbmmodel_doc_custom_eval_note}
1651
        """
1652

1653
1654
    def predict(
        self,
1655
        X: _DaskMatrixLike,  # type: ignore[override]
1656
1657
1658
1659
1660
1661
1662
1663
        raw_score: bool = False,
        start_iteration: int = 0,
        num_iteration: Optional[int] = None,
        pred_leaf: bool = False,
        pred_contrib: bool = False,
        validate_features: bool = False,
        **kwargs: Any
    ) -> dask_Array:
1664
        """Docstring is inherited from the lightgbm.LGBMRanker.predict."""
1665
1666
1667
1668
        return _predict(
            model=self.to_local(),
            data=X,
            client=_get_dask_client(self.client),
1669
1670
1671
1672
1673
1674
            raw_score=raw_score,
            start_iteration=start_iteration,
            num_iteration=num_iteration,
            pred_leaf=pred_leaf,
            pred_contrib=pred_contrib,
            validate_features=validate_features,
1675
1676
            **kwargs
        )
1677

1678
1679
1680
1681
1682
1683
1684
1685
    predict.__doc__ = _lgbmmodel_doc_predict.format(
        description="Return the predicted value for each sample.",
        X_shape="Dask Array or Dask DataFrame of shape = [n_samples, n_features]",
        output_name="predicted_result",
        predicted_result_shape="Dask Array of shape = [n_samples]",
        X_leaves_shape="Dask Array of shape = [n_samples, n_trees]",
        X_SHAP_values_shape="Dask Array of shape = [n_samples, n_features + 1]"
    )
1686

1687
    def to_local(self) -> LGBMRanker:
1688
1689
1690
1691
1692
        """Create regular version of lightgbm.LGBMRanker from the distributed version.

        Returns
        -------
        model : lightgbm.LGBMRanker
1693
            Local underlying model.
1694
        """
1695
        return self._lgb_dask_to_local(LGBMRanker)