dask.py 65.1 KB
Newer Older
1
# coding: utf-8
2
"""Distributed training with LightGBM and dask.distributed.
3

4
This module enables you to perform distributed training with LightGBM on
5
dask.Array and dask.DataFrame collections.
6
7

It is based on dask-lightgbm, which was based on dask-xgboost.
8
"""
9
import socket
10
from collections import defaultdict
11
from copy import deepcopy
12
from enum import Enum, auto
13
from functools import partial
14
from typing import Any, Dict, Iterable, List, Optional, Tuple, Type, Union
15
16
17
from urllib.parse import urlparse

import numpy as np
18
19
import scipy.sparse as ss

20
from .basic import LightGBMError, _choose_param_value, _ConfigAliases, _log_info, _log_warning
21
from .compat import (DASK_INSTALLED, PANDAS_INSTALLED, SKLEARN_INSTALLED, Client, LGBMNotFittedError, concat,
22
23
                     dask_Array, dask_array_from_delayed, dask_bag_from_delayed, dask_DataFrame, dask_Series,
                     default_client, delayed, pd_DataFrame, pd_Series, wait)
24
25
26
from .sklearn import (LGBMClassifier, LGBMModel, LGBMRanker, LGBMRegressor, _LGBM_ScikitCustomObjectiveFunction,
                      _LGBM_ScikitEvalMetricType, _lgbmmodel_doc_custom_eval_note, _lgbmmodel_doc_fit,
                      _lgbmmodel_doc_predict)
27

28
29
30
31
32
33
__all__ = [
    'DaskLGBMClassifier',
    'DaskLGBMRanker',
    'DaskLGBMRegressor',
]

34
35
_DaskCollection = Union[dask_Array, dask_DataFrame, dask_Series]
_DaskMatrixLike = Union[dask_Array, dask_DataFrame]
36
_DaskVectorLike = Union[dask_Array, dask_Series]
37
38
_DaskPart = Union[np.ndarray, pd_DataFrame, pd_Series, ss.spmatrix]
_PredictionDtype = Union[Type[np.float32], Type[np.float64], Type[np.int32], Type[np.int64]]
39

40
41
42
43
44
45
46

class _HostWorkers:

    def __init__(self, default: str, all_workers: List[str]):
        self.default = default
        self.all_workers = all_workers

47
    def __eq__(self, other: object) -> bool:
48
        return (
49
50
            isinstance(other, type(self))
            and self.default == other.default
51
52
            and self.all_workers == other.all_workers
        )
53

54

55
56
57
58
59
60
61
62
63
64
65
66
class _DatasetNames(Enum):
    """Placeholder names used by lightgbm.dask internals to say 'also evaluate the training data'.

    Avoid duplicating the training data when the validation set refers to elements of training data.
    """

    TRAINSET = auto()
    SAMPLE_WEIGHT = auto()
    INIT_SCORE = auto()
    GROUP = auto()


67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
def _get_dask_client(client: Optional[Client]) -> Client:
    """Choose a Dask client to use.

    Parameters
    ----------
    client : dask.distributed.Client or None
        Dask client.

    Returns
    -------
    client : dask.distributed.Client
        A Dask client.
    """
    if client is None:
        return default_client()
    else:
        return client


86
87
def _find_n_open_ports(n: int) -> List[int]:
    """Find n random open ports on localhost.
88
89
90

    Returns
    -------
91
92
    ports : list of int
        n random open ports on localhost.
93
    """
94
95
96
    sockets = []
    for _ in range(n):
        s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
97
        s.bind(('', 0))
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
        sockets.append(s)
    ports = []
    for s in sockets:
        ports.append(s.getsockname()[1])
        s.close()
    return ports


def _group_workers_by_host(worker_addresses: Iterable[str]) -> Dict[str, _HostWorkers]:
    """Group all worker addresses by hostname.

    Returns
    -------
    host_to_workers : dict
        mapping from hostname to all its workers.
    """
    host_to_workers: Dict[str, _HostWorkers] = {}
    for address in worker_addresses:
        hostname = urlparse(address).hostname
117
118
        if not hostname:
            raise ValueError(f"Could not parse host name from worker address '{address}'")
119
        if hostname not in host_to_workers:
120
            host_to_workers[hostname] = _HostWorkers(default=address, all_workers=[address])
121
        else:
122
            host_to_workers[hostname].all_workers.append(address)
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
    return host_to_workers


def _assign_open_ports_to_workers(
    client: Client,
    host_to_workers: Dict[str, _HostWorkers]
) -> Dict[str, int]:
    """Assign an open port to each worker.

    Returns
    -------
    worker_to_port: dict
        mapping from worker address to an open port.
    """
    host_ports_futures = {}
    for hostname, workers in host_to_workers.items():
139
        n_workers_in_host = len(workers.all_workers)
140
141
142
143
144
145
146
147
148
149
        host_ports_futures[hostname] = client.submit(
            _find_n_open_ports,
            n=n_workers_in_host,
            workers=[workers.default],
            pure=False,
            allow_other_workers=False,
        )
    found_ports = client.gather(host_ports_futures)
    worker_to_port = {}
    for hostname, workers in host_to_workers.items():
150
        for worker, port in zip(workers.all_workers, found_ports[hostname]):
151
152
            worker_to_port[worker] = port
    return worker_to_port
153
154


155
def _concat(seq: List[_DaskPart]) -> _DaskPart:
156
157
    if isinstance(seq[0], np.ndarray):
        return np.concatenate(seq, axis=0)
158
    elif isinstance(seq[0], (pd_DataFrame, pd_Series)):
159
        return concat(seq, axis=0)
160
161
162
    elif isinstance(seq[0], ss.spmatrix):
        return ss.vstack(seq, format='csr')
    else:
163
        raise TypeError(f'Data must be one of: numpy arrays, pandas dataframes, sparse matrices (from scipy). Got {type(seq[0]).__name__}.')
164
165


166
167
168
169
def _remove_list_padding(*args: Any) -> List[List[Any]]:
    return [[z for z in arg if z is not None] for arg in args]


170
def _pad_eval_names(lgbm_model: LGBMModel, required_names: List[str]) -> LGBMModel:
171
172
173
174
175
176
    """Append missing (key, value) pairs to a LightGBM model's evals_result_ and best_score_ OrderedDict attrs based on a set of required eval_set names.

    Allows users to rely on expected eval_set names being present when fitting DaskLGBM estimators with ``eval_set``.
    """
    for eval_name in required_names:
        if eval_name not in lgbm_model.evals_result_:
177
            lgbm_model.evals_result_[eval_name] = {}
178
        if eval_name not in lgbm_model.best_score_:
179
            lgbm_model.best_score_[eval_name] = {}
180
181
182
183

    return lgbm_model


184
185
186
187
def _train_part(
    params: Dict[str, Any],
    model_factory: Type[LGBMModel],
    list_of_parts: List[Dict[str, _DaskPart]],
188
189
190
    machines: str,
    local_listen_port: int,
    num_machines: int,
191
192
193
194
    return_model: bool,
    time_out: int = 120,
    **kwargs: Any
) -> Optional[LGBMModel]:
195
    network_params = {
196
197
        'machines': machines,
        'local_listen_port': local_listen_port,
198
        'time_out': time_out,
199
        'num_machines': num_machines
200
    }
201
202
    params.update(network_params)

203
204
    is_ranker = issubclass(model_factory, LGBMRanker)

205
    # Concatenate many parts into one
206
207
208
209
210
211
212
213
214
215
216
217
    data = _concat([x['data'] for x in list_of_parts])
    label = _concat([x['label'] for x in list_of_parts])

    if 'weight' in list_of_parts[0]:
        weight = _concat([x['weight'] for x in list_of_parts])
    else:
        weight = None

    if 'group' in list_of_parts[0]:
        group = _concat([x['group'] for x in list_of_parts])
    else:
        group = None
218

219
220
221
222
223
    if 'init_score' in list_of_parts[0]:
        init_score = _concat([x['init_score'] for x in list_of_parts])
    else:
        init_score = None

224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
    # construct local eval_set data.
    n_evals = max(len(x.get('eval_set', [])) for x in list_of_parts)
    eval_names = kwargs.pop('eval_names', None)
    eval_class_weight = kwargs.get('eval_class_weight')
    local_eval_set = None
    local_eval_names = None
    local_eval_sample_weight = None
    local_eval_init_score = None
    local_eval_group = None

    if n_evals:
        has_eval_sample_weight = any(x.get('eval_sample_weight') is not None for x in list_of_parts)
        has_eval_init_score = any(x.get('eval_init_score') is not None for x in list_of_parts)

        local_eval_set = []
        evals_result_names = []
        if has_eval_sample_weight:
            local_eval_sample_weight = []
        if has_eval_init_score:
            local_eval_init_score = []
        if is_ranker:
            local_eval_group = []

        # store indices of eval_set components that were not contained within local parts.
        missing_eval_component_idx = []

        # consolidate parts of each individual eval component.
        for i in range(n_evals):
            x_e = []
            y_e = []
            w_e = []
            init_score_e = []
            g_e = []
            for part in list_of_parts:
                if not part.get('eval_set'):
                    continue

                # require that eval_name exists in evaluated result data in case dropped due to padding.
                # in distributed training the 'training' eval_set is not detected, will have name 'valid_<index>'.
                if eval_names:
                    evals_result_name = eval_names[i]
                else:
                    evals_result_name = f'valid_{i}'

                eval_set = part['eval_set'][i]
                if eval_set is _DatasetNames.TRAINSET:
                    x_e.append(part['data'])
                    y_e.append(part['label'])
                else:
                    x_e.extend(eval_set[0])
                    y_e.extend(eval_set[1])

                if evals_result_name not in evals_result_names:
                    evals_result_names.append(evals_result_name)

                eval_weight = part.get('eval_sample_weight')
                if eval_weight:
                    if eval_weight[i] is _DatasetNames.SAMPLE_WEIGHT:
                        w_e.append(part['weight'])
                    else:
                        w_e.extend(eval_weight[i])

                eval_init_score = part.get('eval_init_score')
                if eval_init_score:
                    if eval_init_score[i] is _DatasetNames.INIT_SCORE:
                        init_score_e.append(part['init_score'])
                    else:
                        init_score_e.extend(eval_init_score[i])

                eval_group = part.get('eval_group')
                if eval_group:
                    if eval_group[i] is _DatasetNames.GROUP:
                        g_e.append(part['group'])
                    else:
                        g_e.extend(eval_group[i])

            # filter padding from eval parts then _concat each eval_set component.
            x_e, y_e, w_e, init_score_e, g_e = _remove_list_padding(x_e, y_e, w_e, init_score_e, g_e)
            if x_e:
                local_eval_set.append((_concat(x_e), _concat(y_e)))
            else:
                missing_eval_component_idx.append(i)
                continue

            if w_e:
                local_eval_sample_weight.append(_concat(w_e))
            if init_score_e:
                local_eval_init_score.append(_concat(init_score_e))
            if g_e:
                local_eval_group.append(_concat(g_e))

        # reconstruct eval_set fit args/kwargs depending on which components of eval_set are on worker.
        eval_component_idx = [i for i in range(n_evals) if i not in missing_eval_component_idx]
        if eval_names:
            local_eval_names = [eval_names[i] for i in eval_component_idx]
        if eval_class_weight:
            kwargs['eval_class_weight'] = [eval_class_weight[i] for i in eval_component_idx]

322
    model = model_factory(**params)
323
    try:
324
        if is_ranker:
325
326
327
328
329
330
331
332
333
334
335
336
337
            model.fit(
                data,
                label,
                sample_weight=weight,
                init_score=init_score,
                group=group,
                eval_set=local_eval_set,
                eval_sample_weight=local_eval_sample_weight,
                eval_init_score=local_eval_init_score,
                eval_group=local_eval_group,
                eval_names=local_eval_names,
                **kwargs
            )
338
        else:
339
340
341
342
343
344
345
346
347
348
349
            model.fit(
                data,
                label,
                sample_weight=weight,
                init_score=init_score,
                eval_set=local_eval_set,
                eval_sample_weight=local_eval_sample_weight,
                eval_init_score=local_eval_init_score,
                eval_names=local_eval_names,
                **kwargs
            )
350

351
    finally:
352
353
        if getattr(model, "fitted_", False):
            model.booster_.free_network()
354

355
356
357
358
    if n_evals:
        # ensure that expected keys for evals_result_ and best_score_ exist regardless of padding.
        model = _pad_eval_names(model, required_names=evals_result_names)

359
360
361
    return model if return_model else None


362
def _split_to_parts(data: _DaskCollection, is_matrix: bool) -> List[_DaskPart]:
363
364
    parts = data.to_delayed()
    if isinstance(parts, np.ndarray):
365
366
367
368
        if is_matrix:
            assert parts.shape[1] == 1
        else:
            assert parts.ndim == 1 or parts.shape[1] == 1
369
370
371
372
        parts = parts.flatten().tolist()
    return parts


373
def _machines_to_worker_map(machines: str, worker_addresses: Iterable[str]) -> Dict[str, int]:
374
375
376
377
378
379
380
381
382
383
    """Create a worker_map from machines list.

    Given ``machines`` and a list of Dask worker addresses, return a mapping where the keys are
    ``worker_addresses`` and the values are ports from ``machines``.

    Parameters
    ----------
    machines : str
        A comma-delimited list of workers, of the form ``ip1:port,ip2:port``.
    worker_addresses : list of str
384
        An iterable of Dask worker addresses, of the form ``{protocol}{hostname}:{port}``, where ``port`` is the port Dask's scheduler uses to talk to that worker.
385
386
387
388
389
390
391

    Returns
    -------
    result : Dict[str, int]
        Dictionary where keys are work addresses in the form expected by Dask and values are a port for LightGBM to use.
    """
    machine_addresses = machines.split(",")
392
393
394
395

    if len(set(machine_addresses)) != len(machine_addresses):
        raise ValueError(f"Found duplicates in 'machines' ({machines}). Each entry in 'machines' must be a unique IP-port combination.")

396
397
398
399
400
401
402
403
    machine_to_port = defaultdict(set)
    for address in machine_addresses:
        host, port = address.split(":")
        machine_to_port[host].add(int(port))

    out = {}
    for address in worker_addresses:
        worker_host = urlparse(address).hostname
404
405
        if not worker_host:
            raise ValueError(f"Could not parse host name from worker address '{address}'")
406
407
408
409
410
        out[address] = machine_to_port[worker_host].pop()

    return out


411
412
413
414
415
416
def _train(
    client: Client,
    data: _DaskMatrixLike,
    label: _DaskCollection,
    params: Dict[str, Any],
    model_factory: Type[LGBMModel],
417
    sample_weight: Optional[_DaskVectorLike] = None,
418
    init_score: Optional[_DaskCollection] = None,
419
    group: Optional[_DaskVectorLike] = None,
420
421
    eval_set: Optional[List[Tuple[_DaskMatrixLike, _DaskCollection]]] = None,
    eval_names: Optional[List[str]] = None,
422
    eval_sample_weight: Optional[List[_DaskVectorLike]] = None,
423
    eval_class_weight: Optional[List[Union[dict, str]]] = None,
424
    eval_init_score: Optional[List[_DaskCollection]] = None,
425
    eval_group: Optional[List[_DaskVectorLike]] = None,
426
    eval_metric: Optional[_LGBM_ScikitEvalMetricType] = None,
427
    eval_at: Optional[Union[List[int], Tuple[int, ...]]] = None,
428
429
    **kwargs: Any
) -> LGBMModel:
430
431
432
433
    """Inner train routine.

    Parameters
    ----------
434
435
    client : dask.distributed.Client
        Dask client.
436
    data : Dask Array or Dask DataFrame of shape = [n_samples, n_features]
437
        Input feature matrix.
438
    label : Dask Array, Dask DataFrame or Dask Series of shape = [n_samples]
439
440
        The target values (class labels in classification, real numbers in regression).
    params : dict
441
        Parameters passed to constructor of the local underlying model.
442
    model_factory : lightgbm.LGBMClassifier, lightgbm.LGBMRegressor, or lightgbm.LGBMRanker class
443
        Class of the local underlying model.
444
    sample_weight : Dask Array or Dask Series of shape = [n_samples] or None, optional (default=None)
445
        Weights of training data. Weights should be non-negative.
446
    init_score : Dask Array or Dask Series of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task), or Dask Array or Dask DataFrame of shape = [n_samples, n_classes] (for multi-class task), or None, optional (default=None)
447
        Init score of training data.
448
    group : Dask Array or Dask Series or None, optional (default=None)
449
450
451
452
453
        Group/query data.
        Only used in the learning-to-rank task.
        sum(group) = n_samples.
        For example, if you have a 100-document dataset with ``group = [10, 20, 40, 10, 10, 10]``, that means that you have 6 groups,
        where the first 10 records are in the first group, records 11-30 are in the second group, records 31-70 are in the third group, etc.
454
    eval_set : list of (X, y) tuples of Dask data collections, or None, optional (default=None)
455
456
457
        List of (X, y) tuple pairs to use as validation sets.
        Note, that not all workers may receive chunks of every eval set within ``eval_set``. When the returned
        lightgbm estimator is not trained using any chunks of a particular eval set, its corresponding component
458
        of ``evals_result_`` and ``best_score_`` will be empty dictionaries.
459
    eval_names : list of str, or None, optional (default=None)
460
        Names of eval_set.
461
    eval_sample_weight : list of Dask Array or Dask Series, or None, optional (default=None)
462
        Weights for each validation set in eval_set. Weights should be non-negative.
463
464
    eval_class_weight : list of dict or str, or None, optional (default=None)
        Class weights, one dict or str for each validation set in eval_set.
465
    eval_init_score : list of Dask Array, Dask Series or Dask DataFrame (for multi-class task), or None, optional (default=None)
466
        Initial model score for each validation set in eval_set.
467
    eval_group : list of Dask Array or Dask Series, or None, optional (default=None)
468
        Group/query for each validation set in eval_set.
469
470
    eval_metric : str, callable, list or None, optional (default=None)
        If str, it should be a built-in evaluation metric to use.
471
472
473
474
        If callable, it should be a custom evaluation metric, see note below for more details.
        If list, it can be a list of built-in metrics, a list of custom evaluation metrics, or a mix of both.
        In either case, the ``metric`` from the Dask model parameters (or inferred from the objective) will be evaluated and used as well.
        Default: 'l2' for DaskLGBMRegressor, 'binary(multi)_logloss' for DaskLGBMClassifier, 'ndcg' for DaskLGBMRanker.
475
    eval_at : list or tuple of int, optional (default=None)
476
        The evaluation positions of the specified ranking metric.
477
478
479
480
481
482
483
    **kwargs
        Other parameters passed to ``fit`` method of the local underlying model.

    Returns
    -------
    model : lightgbm.LGBMClassifier, lightgbm.LGBMRegressor, or lightgbm.LGBMRanker class
        Returns fitted underlying model.
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512

    Note
    ----

    This method handles setting up the following network parameters based on information
    about the Dask cluster referenced by ``client``.

    * ``local_listen_port``: port that each LightGBM worker opens a listening socket on,
            to accept connections from other workers. This can differ from LightGBM worker
            to LightGBM worker, but does not have to.
    * ``machines``: a comma-delimited list of all workers in the cluster, in the
            form ``ip:port,ip:port``. If running multiple Dask workers on the same host, use different
            ports for each worker. For example, for ``LocalCluster(n_workers=3)``, you might
            pass ``"127.0.0.1:12400,127.0.0.1:12401,127.0.0.1:12402"``.
    * ``num_machines``: number of LightGBM workers.
    * ``timeout``: time in minutes to wait before closing unused sockets.

    The default behavior of this function is to generate ``machines`` from the list of
    Dask workers which hold some piece of the training data, and to search for an open
    port on each worker to be used as ``local_listen_port``.

    If ``machines`` is provided explicitly in ``params``, this function uses the hosts
    and ports in that list directly, and does not do any searching. This means that if
    any of the Dask workers are missing from the list or any of those ports are not free
    when training starts, training will fail.

    If ``local_listen_port`` is provided in ``params`` and ``machines`` is not, this function
    constructs ``machines`` from the list of Dask workers which hold some piece of the
    training data, assuming that each one will use the same ``local_listen_port``.
513
    """
514
515
    params = deepcopy(params)

516
517
518
519
520
521
522
523
    # capture whether local_listen_port or its aliases were provided
    listen_port_in_params = any(
        alias in params for alias in _ConfigAliases.get("local_listen_port")
    )

    # capture whether machines or its aliases were provided
    machines_in_params = any(
        alias in params for alias in _ConfigAliases.get("machines")
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
    )

    params = _choose_param_value(
        main_param_name="tree_learner",
        params=params,
        default_value="data"
    )
    allowed_tree_learners = {
        'data',
        'data_parallel',
        'feature',
        'feature_parallel',
        'voting',
        'voting_parallel'
    }
    if params["tree_learner"] not in allowed_tree_learners:
540
        _log_warning(f'Parameter tree_learner set to {params["tree_learner"]}, which is not allowed. Using "data" as default')
541
542
543
544
545
        params['tree_learner'] = 'data'

    # Some passed-in parameters can be removed:
    #   * 'num_machines': set automatically from Dask worker list
    #   * 'num_threads': overridden to match nthreads on each Dask process
546
547
548
549
    for param_alias in _ConfigAliases.get('num_machines', 'num_threads'):
        if param_alias in params:
            _log_warning(f"Parameter {param_alias} will be ignored.")
            params.pop(param_alias)
550

551
    # Split arrays/dataframes into parts. Arrange parts into dicts to enforce co-locality
552
553
    data_parts = _split_to_parts(data=data, is_matrix=True)
    label_parts = _split_to_parts(data=label, is_matrix=False)
554
    parts = [{'data': x, 'label': y} for (x, y) in zip(data_parts, label_parts)]
555
    n_parts = len(parts)
556
557
558

    if sample_weight is not None:
        weight_parts = _split_to_parts(data=sample_weight, is_matrix=False)
559
        for i in range(n_parts):
560
            parts[i]['weight'] = weight_parts[i]
561
562
563

    if group is not None:
        group_parts = _split_to_parts(data=group, is_matrix=False)
564
        for i in range(n_parts):
565
            parts[i]['group'] = group_parts[i]
566

567
568
569
570
571
    if init_score is not None:
        init_score_parts = _split_to_parts(data=init_score, is_matrix=False)
        for i in range(n_parts):
            parts[i]['init_score'] = init_score_parts[i]

572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
    # evals_set will to be re-constructed into smaller lists of (X, y) tuples, where
    # X and y are each delayed sub-lists of original eval dask Collections.
    if eval_set:
        # find maximum number of parts in an individual eval set so that we can
        # pad eval sets when they come in different sizes.
        n_largest_eval_parts = max(x[0].npartitions for x in eval_set)

        eval_sets = defaultdict(list)
        if eval_sample_weight:
            eval_sample_weights = defaultdict(list)
        if eval_group:
            eval_groups = defaultdict(list)
        if eval_init_score:
            eval_init_scores = defaultdict(list)

        for i, (X_eval, y_eval) in enumerate(eval_set):
            n_this_eval_parts = X_eval.npartitions

            # when individual eval set is equivalent to training data, skip recomputing parts.
            if X_eval is data and y_eval is label:
                for parts_idx in range(n_parts):
                    eval_sets[parts_idx].append(_DatasetNames.TRAINSET)
            else:
                eval_x_parts = _split_to_parts(data=X_eval, is_matrix=True)
                eval_y_parts = _split_to_parts(data=y_eval, is_matrix=False)
                for j in range(n_largest_eval_parts):
                    parts_idx = j % n_parts

                    # add None-padding for individual eval_set member if it is smaller than the largest member.
                    if j < n_this_eval_parts:
                        x_e = eval_x_parts[j]
                        y_e = eval_y_parts[j]
                    else:
                        x_e = None
                        y_e = None

                    if j < n_parts:
                        # first time a chunk of this eval set is added to this part.
                        eval_sets[parts_idx].append(([x_e], [y_e]))
                    else:
                        # append additional chunks of this eval set to this part.
                        eval_sets[parts_idx][-1][0].append(x_e)
                        eval_sets[parts_idx][-1][1].append(y_e)

            if eval_sample_weight:
                if eval_sample_weight[i] is sample_weight:
                    for parts_idx in range(n_parts):
                        eval_sample_weights[parts_idx].append(_DatasetNames.SAMPLE_WEIGHT)
                else:
                    eval_w_parts = _split_to_parts(data=eval_sample_weight[i], is_matrix=False)

                    # ensure that all evaluation parts map uniquely to one part.
                    for j in range(n_largest_eval_parts):
                        if j < n_this_eval_parts:
                            w_e = eval_w_parts[j]
                        else:
                            w_e = None

                        parts_idx = j % n_parts
                        if j < n_parts:
                            eval_sample_weights[parts_idx].append([w_e])
                        else:
                            eval_sample_weights[parts_idx][-1].append(w_e)

            if eval_init_score:
                if eval_init_score[i] is init_score:
                    for parts_idx in range(n_parts):
                        eval_init_scores[parts_idx].append(_DatasetNames.INIT_SCORE)
                else:
                    eval_init_score_parts = _split_to_parts(data=eval_init_score[i], is_matrix=False)
                    for j in range(n_largest_eval_parts):
                        if j < n_this_eval_parts:
                            init_score_e = eval_init_score_parts[j]
                        else:
                            init_score_e = None

                        parts_idx = j % n_parts
                        if j < n_parts:
                            eval_init_scores[parts_idx].append([init_score_e])
                        else:
                            eval_init_scores[parts_idx][-1].append(init_score_e)

            if eval_group:
                if eval_group[i] is group:
                    for parts_idx in range(n_parts):
                        eval_groups[parts_idx].append(_DatasetNames.GROUP)
                else:
                    eval_g_parts = _split_to_parts(data=eval_group[i], is_matrix=False)
                    for j in range(n_largest_eval_parts):
                        if j < n_this_eval_parts:
                            g_e = eval_g_parts[j]
                        else:
                            g_e = None

                        parts_idx = j % n_parts
                        if j < n_parts:
                            eval_groups[parts_idx].append([g_e])
                        else:
                            eval_groups[parts_idx][-1].append(g_e)

        # assign sub-eval_set components to worker parts.
        for parts_idx, e_set in eval_sets.items():
            parts[parts_idx]['eval_set'] = e_set
            if eval_sample_weight:
                parts[parts_idx]['eval_sample_weight'] = eval_sample_weights[parts_idx]
            if eval_init_score:
                parts[parts_idx]['eval_init_score'] = eval_init_scores[parts_idx]
            if eval_group:
                parts[parts_idx]['eval_group'] = eval_groups[parts_idx]

682
    # Start computation in the background
683
    parts = list(map(delayed, parts))
684
685
686
687
    parts = client.compute(parts)
    wait(parts)

    for part in parts:
688
        if part.status == 'error':  # type: ignore
689
690
691
            return part  # trigger error locally

    # Find locations of all parts and map them to particular Dask workers
692
    key_to_part_dict = {part.key: part for part in parts}  # type: ignore
693
694
695
696
697
    who_has = client.who_has(parts)
    worker_map = defaultdict(list)
    for key, workers in who_has.items():
        worker_map[next(iter(workers))].append(key_to_part_dict[key])

698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
    # Check that all workers were provided some of eval_set. Otherwise warn user that validation
    # data artifacts may not be populated depending on worker returning final estimator.
    if eval_set:
        for worker in worker_map:
            has_eval_set = False
            for part in worker_map[worker]:
                if 'eval_set' in part.result():
                    has_eval_set = True
                    break

            if not has_eval_set:
                _log_warning(
                    f"Worker {worker} was not allocated eval_set data. Therefore evals_result_ and best_score_ data may be unreliable. "
                    "Try rebalancing data across workers."
                )

    # assign general validation set settings to fit kwargs.
    if eval_names:
        kwargs['eval_names'] = eval_names
    if eval_class_weight:
        kwargs['eval_class_weight'] = eval_class_weight
    if eval_metric:
        kwargs['eval_metric'] = eval_metric
    if eval_at:
        kwargs['eval_at'] = eval_at

724
725
726
    master_worker = next(iter(worker_map))
    worker_ncores = client.ncores()

727
728
729
730
731
732
    # resolve aliases for network parameters and pop the result off params.
    # these values are added back in calls to `_train_part()`
    params = _choose_param_value(
        main_param_name="local_listen_port",
        params=params,
        default_value=12400
733
    )
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
    local_listen_port = params.pop("local_listen_port")

    params = _choose_param_value(
        main_param_name="machines",
        params=params,
        default_value=None
    )
    machines = params.pop("machines")

    # figure out network params
    worker_addresses = worker_map.keys()
    if machines is not None:
        _log_info("Using passed-in 'machines' parameter")
        worker_address_to_port = _machines_to_worker_map(
            machines=machines,
            worker_addresses=worker_addresses
        )
    else:
        if listen_port_in_params:
            _log_info("Using passed-in 'local_listen_port' for all workers")
            unique_hosts = set(urlparse(a).hostname for a in worker_addresses)
            if len(unique_hosts) < len(worker_addresses):
                msg = (
                    "'local_listen_port' was provided in Dask training parameters, but at least one "
                    "machine in the cluster has multiple Dask worker processes running on it. Please omit "
                    "'local_listen_port' or pass 'machines'."
                )
                raise LightGBMError(msg)

            worker_address_to_port = {
                address: local_listen_port
                for address in worker_addresses
            }
        else:
            _log_info("Finding random open ports for workers")
769
770
            host_to_workers = _group_workers_by_host(worker_map.keys())
            worker_address_to_port = _assign_open_ports_to_workers(client, host_to_workers)
771

772
        machines = ','.join([
773
            f'{urlparse(worker_address).hostname}:{port}'
774
775
776
777
778
            for worker_address, port
            in worker_address_to_port.items()
        ])

    num_machines = len(worker_address_to_port)
779

780
    # Tell each worker to train on the parts that it has locally
781
    #
782
    # This code treats ``_train_part()`` calls as not "pure" because:
783
    #     1. there is randomness in the training process unless parameters ``seed``
784
    #        and ``deterministic`` are set
785
786
787
    #     2. even with those parameters set, the output of one ``_train_part()`` call
    #        relies on global state (it and all the other LightGBM training processes
    #        coordinate with each other)
788
789
790
791
792
793
    futures_classifiers = [
        client.submit(
            _train_part,
            model_factory=model_factory,
            params={**params, 'num_threads': worker_ncores[worker]},
            list_of_parts=list_of_parts,
794
795
796
            machines=machines,
            local_listen_port=worker_address_to_port[worker],
            num_machines=num_machines,
797
798
            time_out=params.get('time_out', 120),
            return_model=(worker == master_worker),
799
800
801
            workers=[worker],
            allow_other_workers=False,
            pure=False,
802
803
804
805
            **kwargs
        )
        for worker, list_of_parts in worker_map.items()
    ]
806
807
808

    results = client.gather(futures_classifiers)
    results = [v for v in results if v]
809
810
811
    model = results[0]

    # if network parameters were changed during training, remove them from the
Andrew Ziem's avatar
Andrew Ziem committed
812
    # returned model so that they're generated dynamically on every run based
813
814
815
816
817
818
819
820
821
822
823
824
825
826
    # on the Dask cluster you're connected to and which workers have pieces of
    # the training data
    if not listen_port_in_params:
        for param in _ConfigAliases.get('local_listen_port'):
            model._other_params.pop(param, None)

    if not machines_in_params:
        for param in _ConfigAliases.get('machines'):
            model._other_params.pop(param, None)

    for param in _ConfigAliases.get('num_machines', 'timeout'):
        model._other_params.pop(param, None)

    return model
827
828


829
830
831
832
833
834
835
836
837
def _predict_part(
    part: _DaskPart,
    model: LGBMModel,
    raw_score: bool,
    pred_proba: bool,
    pred_leaf: bool,
    pred_contrib: bool,
    **kwargs: Any
) -> _DaskPart:
838

839
    if part.shape[0] == 0:
840
        result = np.array([])
841
842
    elif pred_proba:
        result = model.predict_proba(
843
            part,
844
845
846
847
848
            raw_score=raw_score,
            pred_leaf=pred_leaf,
            pred_contrib=pred_contrib,
            **kwargs
        )
849
    else:
850
        result = model.predict(
851
            part,
852
853
854
855
856
            raw_score=raw_score,
            pred_leaf=pred_leaf,
            pred_contrib=pred_contrib,
            **kwargs
        )
857

858
    # dask.DataFrame.map_partitions() expects each call to return a pandas DataFrame or Series
859
    if isinstance(part, pd_DataFrame):
860
        if len(result.shape) == 2:
861
            result = pd_DataFrame(result, index=part.index)
862
        else:
863
            result = pd_Series(result, index=part.index, name='predictions')
864
865
866
867

    return result


868
869
870
def _predict(
    model: LGBMModel,
    data: _DaskMatrixLike,
871
    client: Client,
872
873
874
875
876
877
    raw_score: bool = False,
    pred_proba: bool = False,
    pred_leaf: bool = False,
    pred_contrib: bool = False,
    dtype: _PredictionDtype = np.float32,
    **kwargs: Any
878
) -> Union[dask_Array, List[dask_Array]]:
879
880
881
882
    """Inner predict routine.

    Parameters
    ----------
883
    model : lightgbm.LGBMClassifier, lightgbm.LGBMRegressor, or lightgbm.LGBMRanker class
884
        Fitted underlying model.
885
    data : Dask Array or Dask DataFrame of shape = [n_samples, n_features]
886
        Input feature matrix.
887
888
    raw_score : bool, optional (default=False)
        Whether to predict raw scores.
889
890
891
892
893
894
    pred_proba : bool, optional (default=False)
        Should method return results of ``predict_proba`` (``pred_proba=True``) or ``predict`` (``pred_proba=False``).
    pred_leaf : bool, optional (default=False)
        Whether to predict leaf index.
    pred_contrib : bool, optional (default=False)
        Whether to predict feature contributions.
895
    dtype : np.dtype, optional (default=np.float32)
896
        Dtype of the output.
897
    **kwargs
898
        Other parameters passed to ``predict`` or ``predict_proba`` method.
899
900
901

    Returns
    -------
902
    predicted_result : Dask Array of shape = [n_samples] or shape = [n_samples, n_classes]
903
        The predicted values.
904
    X_leaves : Dask Array of shape = [n_samples, n_trees] or shape = [n_samples, n_trees * n_classes]
905
        If ``pred_leaf=True``, the predicted leaf of every tree for each sample.
906
    X_SHAP_values : Dask Array of shape = [n_samples, n_features + 1] or shape = [n_samples, (n_features + 1) * n_classes] or (if multi-class and using sparse inputs) a list of ``n_classes`` Dask Arrays of shape = [n_samples, n_features + 1]
907
        If ``pred_contrib=True``, the feature contributions for each sample.
908
    """
909
910
    if not all((DASK_INSTALLED, PANDAS_INSTALLED, SKLEARN_INSTALLED)):
        raise LightGBMError('dask, pandas and scikit-learn are required for lightgbm.dask')
911
    if isinstance(data, dask_DataFrame):
912
913
914
915
916
917
918
919
920
        return data.map_partitions(
            _predict_part,
            model=model,
            raw_score=raw_score,
            pred_proba=pred_proba,
            pred_leaf=pred_leaf,
            pred_contrib=pred_contrib,
            **kwargs
        ).values
921
    elif isinstance(data, dask_Array):
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
        # for multi-class classification with sparse matrices, pred_contrib predictions
        # are returned as a list of sparse matrices (one per class)
        num_classes = model._n_classes or -1

        if (
            num_classes > 2
            and pred_contrib
            and isinstance(data._meta, ss.spmatrix)
        ):

            predict_function = partial(
                _predict_part,
                model=model,
                raw_score=False,
                pred_proba=pred_proba,
                pred_leaf=False,
                pred_contrib=True,
                **kwargs
            )

            delayed_chunks = data.to_delayed()
            bag = dask_bag_from_delayed(delayed_chunks[:, 0])

            @delayed
            def _extract(items: List[Any], i: int) -> Any:
                return items[i]

            preds = bag.map_partitions(predict_function)

            # pred_contrib output will have one column per feature,
            # plus one more for the base value
            num_cols = model.n_features_ + 1

            nrows_per_chunk = data.chunks[0]
956
            out: List[List[dask_Array]] = [[] for _ in range(num_classes)]
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980

            # need to tell Dask the expected type and shape of individual preds
            pred_meta = data._meta

            for j, partition in enumerate(preds.to_delayed()):
                for i in range(num_classes):
                    part = dask_array_from_delayed(
                        value=_extract(partition, i),
                        shape=(nrows_per_chunk[j], num_cols),
                        meta=pred_meta
                    )
                    out[i].append(part)

            # by default, dask.array.concatenate() concatenates sparse arrays into a COO matrix
            # the code below is used instead to ensure that the sparse type is preserved during concatentation
            if isinstance(pred_meta, ss.csr_matrix):
                concat_fn = partial(ss.vstack, format='csr')
            elif isinstance(pred_meta, ss.csc_matrix):
                concat_fn = partial(ss.vstack, format='csc')
            else:
                concat_fn = ss.vstack

            # At this point, `out` is a list of lists of delayeds (each of which points to a matrix).
            # Concatenate them to return a list of Dask Arrays.
981
            out_arrays: List[dask_Array] = []
982
            for i in range(num_classes):
983
984
985
986
987
988
                out_arrays.append(
                    dask_array_from_delayed(
                        value=delayed(concat_fn)(out[i]),
                        shape=(data.shape[0], num_cols),
                        meta=pred_meta
                    )
989
990
                )

991
            return out_arrays
992

993
994
        data_row = client.compute(data[[0]]).result()
        predict_fn = partial(
995
996
997
998
999
1000
            _predict_part,
            model=model,
            raw_score=raw_score,
            pred_proba=pred_proba,
            pred_leaf=pred_leaf,
            pred_contrib=pred_contrib,
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
            **kwargs,
        )
        pred_row = predict_fn(data_row)
        chunks = (data.chunks[0],)
        map_blocks_kwargs = {}
        if len(pred_row.shape) > 1:
            chunks += (pred_row.shape[1],)
        else:
            map_blocks_kwargs['drop_axis'] = 1
        return data.map_blocks(
            predict_fn,
            chunks=chunks,
            meta=pred_row,
1014
            dtype=dtype,
1015
            **map_blocks_kwargs,
1016
        )
1017
    else:
1018
        raise TypeError(f'Data must be either Dask Array or Dask DataFrame. Got {type(data).__name__}.')
1019
1020


1021
class _DaskLGBMModel:
1022

1023
1024
    @property
    def client_(self) -> Client:
1025
        """:obj:`dask.distributed.Client`: Dask client.
1026
1027
1028
1029
1030
1031
1032
1033
1034

        This property can be passed in the constructor or updated
        with ``model.set_params(client=client)``.
        """
        if not getattr(self, "fitted_", False):
            raise LGBMNotFittedError('Cannot access property client_ before calling fit().')

        return _get_dask_client(client=self.client)

1035
    def _lgb_dask_getstate(self) -> Dict[Any, Any]:
1036
1037
1038
1039
        """Remove un-picklable attributes before serialization."""
        client = self.__dict__.pop("client", None)
        self._other_params.pop("client", None)
        out = deepcopy(self.__dict__)
1040
        out.update({"client": None})
1041
1042
1043
        self.client = client
        return out

1044
    def _lgb_dask_fit(
1045
1046
1047
1048
        self,
        model_factory: Type[LGBMModel],
        X: _DaskMatrixLike,
        y: _DaskCollection,
1049
        sample_weight: Optional[_DaskVectorLike] = None,
1050
        init_score: Optional[_DaskCollection] = None,
1051
        group: Optional[_DaskVectorLike] = None,
1052
1053
        eval_set: Optional[List[Tuple[_DaskMatrixLike, _DaskCollection]]] = None,
        eval_names: Optional[List[str]] = None,
1054
        eval_sample_weight: Optional[List[_DaskVectorLike]] = None,
1055
        eval_class_weight: Optional[List[Union[dict, str]]] = None,
1056
        eval_init_score: Optional[List[_DaskCollection]] = None,
1057
        eval_group: Optional[List[_DaskVectorLike]] = None,
1058
        eval_metric: Optional[_LGBM_ScikitEvalMetricType] = None,
1059
        eval_at: Optional[Union[List[int], Tuple[int, ...]]] = None,
1060
1061
        **kwargs: Any
    ) -> "_DaskLGBMModel":
1062
1063
        if not DASK_INSTALLED:
            raise LightGBMError('dask is required for lightgbm.dask')
1064
1065
        if not all((DASK_INSTALLED, PANDAS_INSTALLED, SKLEARN_INSTALLED)):
            raise LightGBMError('dask, pandas and scikit-learn are required for lightgbm.dask')
1066
1067

        params = self.get_params(True)
1068
        params.pop("client", None)
1069
1070

        model = _train(
1071
            client=_get_dask_client(self.client),
1072
1073
1074
1075
1076
            data=X,
            label=y,
            params=params,
            model_factory=model_factory,
            sample_weight=sample_weight,
1077
            init_score=init_score,
1078
            group=group,
1079
1080
1081
1082
1083
1084
1085
1086
            eval_set=eval_set,
            eval_names=eval_names,
            eval_sample_weight=eval_sample_weight,
            eval_class_weight=eval_class_weight,
            eval_init_score=eval_init_score,
            eval_group=eval_group,
            eval_metric=eval_metric,
            eval_at=eval_at,
1087
1088
            **kwargs
        )
1089
1090

        self.set_params(**model.get_params())
1091
        self._lgb_dask_copy_extra_params(model, self)
1092
1093
1094

        return self

1095
    def _lgb_dask_to_local(self, model_factory: Type[LGBMModel]) -> LGBMModel:
1096
1097
1098
        params = self.get_params()
        params.pop("client", None)
        model = model_factory(**params)
1099
        self._lgb_dask_copy_extra_params(self, model)
1100
        model._other_params.pop("client", None)
1101
1102
1103
        return model

    @staticmethod
1104
    def _lgb_dask_copy_extra_params(source: Union["_DaskLGBMModel", LGBMModel], dest: Union["_DaskLGBMModel", LGBMModel]) -> None:
1105
1106
1107
1108
        params = source.get_params()
        attributes = source.__dict__
        extra_param_names = set(attributes.keys()).difference(params.keys())
        for name in extra_param_names:
1109
            setattr(dest, name, attributes[name])
1110
1111


1112
class DaskLGBMClassifier(LGBMClassifier, _DaskLGBMModel):
1113
1114
    """Distributed version of lightgbm.LGBMClassifier."""

1115
1116
1117
1118
1119
1120
1121
1122
    def __init__(
        self,
        boosting_type: str = 'gbdt',
        num_leaves: int = 31,
        max_depth: int = -1,
        learning_rate: float = 0.1,
        n_estimators: int = 100,
        subsample_for_bin: int = 200000,
1123
        objective: Optional[Union[str, _LGBM_ScikitCustomObjectiveFunction]] = None,
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
        class_weight: Optional[Union[dict, str]] = None,
        min_split_gain: float = 0.,
        min_child_weight: float = 1e-3,
        min_child_samples: int = 20,
        subsample: float = 1.,
        subsample_freq: int = 0,
        colsample_bytree: float = 1.,
        reg_alpha: float = 0.,
        reg_lambda: float = 0.,
        random_state: Optional[Union[int, np.random.RandomState]] = None,
1134
        n_jobs: Optional[int] = None,
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
        importance_type: str = 'split',
        client: Optional[Client] = None,
        **kwargs: Any
    ):
        """Docstring is inherited from the lightgbm.LGBMClassifier.__init__."""
        self.client = client
        super().__init__(
            boosting_type=boosting_type,
            num_leaves=num_leaves,
            max_depth=max_depth,
            learning_rate=learning_rate,
            n_estimators=n_estimators,
            subsample_for_bin=subsample_for_bin,
            objective=objective,
            class_weight=class_weight,
            min_split_gain=min_split_gain,
            min_child_weight=min_child_weight,
            min_child_samples=min_child_samples,
            subsample=subsample,
            subsample_freq=subsample_freq,
            colsample_bytree=colsample_bytree,
            reg_alpha=reg_alpha,
            reg_lambda=reg_lambda,
            random_state=random_state,
            n_jobs=n_jobs,
            importance_type=importance_type,
            **kwargs
        )

    _base_doc = LGBMClassifier.__init__.__doc__
1165
    _before_kwargs, _kwargs, _after_kwargs = _base_doc.partition('**kwargs')  # type: ignore
1166
    __init__.__doc__ = f"""
1167
1168
1169
1170
        {_before_kwargs}client : dask.distributed.Client or None, optional (default=None)
        {' ':4}Dask client. If ``None``, ``distributed.default_client()`` will be used at runtime. The Dask client used by this class will not be saved if the model object is pickled.
        {_kwargs}{_after_kwargs}
        """
1171
1172

    def __getstate__(self) -> Dict[Any, Any]:
1173
        return self._lgb_dask_getstate()
1174

1175
    def fit(  # type: ignore[override]
1176
1177
1178
        self,
        X: _DaskMatrixLike,
        y: _DaskCollection,
1179
        sample_weight: Optional[_DaskVectorLike] = None,
1180
        init_score: Optional[_DaskCollection] = None,
1181
1182
        eval_set: Optional[List[Tuple[_DaskMatrixLike, _DaskCollection]]] = None,
        eval_names: Optional[List[str]] = None,
1183
        eval_sample_weight: Optional[List[_DaskVectorLike]] = None,
1184
        eval_class_weight: Optional[List[Union[dict, str]]] = None,
1185
        eval_init_score: Optional[List[_DaskCollection]] = None,
1186
        eval_metric: Optional[_LGBM_ScikitEvalMetricType] = None,
1187
1188
        **kwargs: Any
    ) -> "DaskLGBMClassifier":
1189
        """Docstring is inherited from the lightgbm.LGBMClassifier.fit."""
1190
        self._lgb_dask_fit(
1191
1192
1193
1194
            model_factory=LGBMClassifier,
            X=X,
            y=y,
            sample_weight=sample_weight,
1195
            init_score=init_score,
1196
1197
1198
1199
1200
1201
            eval_set=eval_set,
            eval_names=eval_names,
            eval_sample_weight=eval_sample_weight,
            eval_class_weight=eval_class_weight,
            eval_init_score=eval_init_score,
            eval_metric=eval_metric,
1202
1203
            **kwargs
        )
1204
        return self
1205

1206
1207
1208
    _base_doc = _lgbmmodel_doc_fit.format(
        X_shape="Dask Array or Dask DataFrame of shape = [n_samples, n_features]",
        y_shape="Dask Array, Dask DataFrame or Dask Series of shape = [n_samples]",
1209
        sample_weight_shape="Dask Array or Dask Series of shape = [n_samples] or None, optional (default=None)",
1210
        init_score_shape="Dask Array or Dask Series of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task), or Dask Array or Dask DataFrame of shape = [n_samples, n_classes] (for multi-class task), or None, optional (default=None)",
1211
        group_shape="Dask Array or Dask Series or None, optional (default=None)",
1212
        eval_sample_weight_shape="list of Dask Array or Dask Series, or None, optional (default=None)",
1213
        eval_init_score_shape="list of Dask Array, Dask Series or Dask DataFrame (for multi-class task), or None, optional (default=None)",
1214
        eval_group_shape="list of Dask Array or Dask Series, or None, optional (default=None)"
1215
1216
    )

1217
    # DaskLGBMClassifier does not support group, eval_group.
1218
    _base_doc = (_base_doc[:_base_doc.find('group :')]
1219
1220
1221
1222
1223
                 + _base_doc[_base_doc.find('eval_set :'):])

    _base_doc = (_base_doc[:_base_doc.find('eval_group :')]
                 + _base_doc[_base_doc.find('eval_metric :'):])

1224
    # DaskLGBMClassifier support for callbacks and init_model is not tested
1225
1226
    fit.__doc__ = f"""{_base_doc[:_base_doc.find('callbacks :')]}**kwargs
        Other parameters passed through to ``LGBMClassifier.fit()``.
1227

1228
1229
1230
1231
1232
    Returns
    -------
    self : lightgbm.DaskLGBMClassifier
        Returns self.

1233
    {_lgbmmodel_doc_custom_eval_note}
1234
        """
1235

1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
    def predict(
        self,
        X: _DaskMatrixLike,
        raw_score: bool = False,
        start_iteration: int = 0,
        num_iteration: Optional[int] = None,
        pred_leaf: bool = False,
        pred_contrib: bool = False,
        validate_features: bool = False,
        **kwargs: Any
    ) -> dask_Array:
1247
        """Docstring is inherited from the lightgbm.LGBMClassifier.predict."""
1248
1249
1250
1251
        return _predict(
            model=self.to_local(),
            data=X,
            dtype=self.classes_.dtype,
1252
            client=_get_dask_client(self.client),
1253
1254
1255
1256
1257
1258
            raw_score=raw_score,
            start_iteration=start_iteration,
            num_iteration=num_iteration,
            pred_leaf=pred_leaf,
            pred_contrib=pred_contrib,
            validate_features=validate_features,
1259
1260
1261
            **kwargs
        )

1262
1263
1264
1265
1266
1267
    predict.__doc__ = _lgbmmodel_doc_predict.format(
        description="Return the predicted value for each sample.",
        X_shape="Dask Array or Dask DataFrame of shape = [n_samples, n_features]",
        output_name="predicted_result",
        predicted_result_shape="Dask Array of shape = [n_samples] or shape = [n_samples, n_classes]",
        X_leaves_shape="Dask Array of shape = [n_samples, n_trees] or shape = [n_samples, n_trees * n_classes]",
1268
        X_SHAP_values_shape="Dask Array of shape = [n_samples, n_features + 1] or shape = [n_samples, (n_features + 1) * n_classes] or (if multi-class and using sparse inputs) a list of ``n_classes`` Dask Arrays of shape = [n_samples, n_features + 1]"
1269
    )
1270

1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
    def predict_proba(
        self,
        X: _DaskMatrixLike,
        raw_score: bool = False,
        start_iteration: int = 0,
        num_iteration: Optional[int] = None,
        pred_leaf: bool = False,
        pred_contrib: bool = False,
        validate_features: bool = False,
        **kwargs: Any
    ) -> dask_Array:
1282
        """Docstring is inherited from the lightgbm.LGBMClassifier.predict_proba."""
1283
1284
1285
1286
        return _predict(
            model=self.to_local(),
            data=X,
            pred_proba=True,
1287
            client=_get_dask_client(self.client),
1288
1289
1290
1291
1292
1293
            raw_score=raw_score,
            start_iteration=start_iteration,
            num_iteration=num_iteration,
            pred_leaf=pred_leaf,
            pred_contrib=pred_contrib,
            validate_features=validate_features,
1294
1295
1296
            **kwargs
        )

1297
1298
1299
1300
    predict_proba.__doc__ = _lgbmmodel_doc_predict.format(
        description="Return the predicted probability for each class for each sample.",
        X_shape="Dask Array or Dask DataFrame of shape = [n_samples, n_features]",
        output_name="predicted_probability",
1301
        predicted_result_shape="Dask Array of shape = [n_samples] or shape = [n_samples, n_classes]",
1302
        X_leaves_shape="Dask Array of shape = [n_samples, n_trees] or shape = [n_samples, n_trees * n_classes]",
1303
        X_SHAP_values_shape="Dask Array of shape = [n_samples, n_features + 1] or shape = [n_samples, (n_features + 1) * n_classes] or (if multi-class and using sparse inputs) a list of ``n_classes`` Dask Arrays of shape = [n_samples, n_features + 1]"
1304
    )
1305

1306
    def to_local(self) -> LGBMClassifier:
1307
1308
1309
1310
1311
        """Create regular version of lightgbm.LGBMClassifier from the distributed version.

        Returns
        -------
        model : lightgbm.LGBMClassifier
1312
            Local underlying model.
1313
        """
1314
        return self._lgb_dask_to_local(LGBMClassifier)
1315
1316


1317
class DaskLGBMRegressor(LGBMRegressor, _DaskLGBMModel):
1318
    """Distributed version of lightgbm.LGBMRegressor."""
1319

1320
1321
1322
1323
1324
1325
1326
1327
    def __init__(
        self,
        boosting_type: str = 'gbdt',
        num_leaves: int = 31,
        max_depth: int = -1,
        learning_rate: float = 0.1,
        n_estimators: int = 100,
        subsample_for_bin: int = 200000,
1328
        objective: Optional[Union[str, _LGBM_ScikitCustomObjectiveFunction]] = None,
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
        class_weight: Optional[Union[dict, str]] = None,
        min_split_gain: float = 0.,
        min_child_weight: float = 1e-3,
        min_child_samples: int = 20,
        subsample: float = 1.,
        subsample_freq: int = 0,
        colsample_bytree: float = 1.,
        reg_alpha: float = 0.,
        reg_lambda: float = 0.,
        random_state: Optional[Union[int, np.random.RandomState]] = None,
1339
        n_jobs: Optional[int] = None,
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
        importance_type: str = 'split',
        client: Optional[Client] = None,
        **kwargs: Any
    ):
        """Docstring is inherited from the lightgbm.LGBMRegressor.__init__."""
        self.client = client
        super().__init__(
            boosting_type=boosting_type,
            num_leaves=num_leaves,
            max_depth=max_depth,
            learning_rate=learning_rate,
            n_estimators=n_estimators,
            subsample_for_bin=subsample_for_bin,
            objective=objective,
            class_weight=class_weight,
            min_split_gain=min_split_gain,
            min_child_weight=min_child_weight,
            min_child_samples=min_child_samples,
            subsample=subsample,
            subsample_freq=subsample_freq,
            colsample_bytree=colsample_bytree,
            reg_alpha=reg_alpha,
            reg_lambda=reg_lambda,
            random_state=random_state,
            n_jobs=n_jobs,
            importance_type=importance_type,
            **kwargs
        )

    _base_doc = LGBMRegressor.__init__.__doc__
1370
    _before_kwargs, _kwargs, _after_kwargs = _base_doc.partition('**kwargs')  # type: ignore
1371
    __init__.__doc__ = f"""
1372
1373
1374
1375
        {_before_kwargs}client : dask.distributed.Client or None, optional (default=None)
        {' ':4}Dask client. If ``None``, ``distributed.default_client()`` will be used at runtime. The Dask client used by this class will not be saved if the model object is pickled.
        {_kwargs}{_after_kwargs}
        """
1376

1377
    def __getstate__(self) -> Dict[Any, Any]:
1378
        return self._lgb_dask_getstate()
1379

1380
    def fit(  # type: ignore[override]
1381
1382
1383
        self,
        X: _DaskMatrixLike,
        y: _DaskCollection,
1384
1385
        sample_weight: Optional[_DaskVectorLike] = None,
        init_score: Optional[_DaskVectorLike] = None,
1386
1387
        eval_set: Optional[List[Tuple[_DaskMatrixLike, _DaskCollection]]] = None,
        eval_names: Optional[List[str]] = None,
1388
1389
        eval_sample_weight: Optional[List[_DaskVectorLike]] = None,
        eval_init_score: Optional[List[_DaskVectorLike]] = None,
1390
        eval_metric: Optional[_LGBM_ScikitEvalMetricType] = None,
1391
1392
        **kwargs: Any
    ) -> "DaskLGBMRegressor":
1393
        """Docstring is inherited from the lightgbm.LGBMRegressor.fit."""
1394
        self._lgb_dask_fit(
1395
1396
1397
1398
            model_factory=LGBMRegressor,
            X=X,
            y=y,
            sample_weight=sample_weight,
1399
            init_score=init_score,
1400
1401
1402
1403
1404
            eval_set=eval_set,
            eval_names=eval_names,
            eval_sample_weight=eval_sample_weight,
            eval_init_score=eval_init_score,
            eval_metric=eval_metric,
1405
1406
            **kwargs
        )
1407
        return self
1408

1409
1410
1411
    _base_doc = _lgbmmodel_doc_fit.format(
        X_shape="Dask Array or Dask DataFrame of shape = [n_samples, n_features]",
        y_shape="Dask Array, Dask DataFrame or Dask Series of shape = [n_samples]",
1412
1413
        sample_weight_shape="Dask Array or Dask Series of shape = [n_samples] or None, optional (default=None)",
        init_score_shape="Dask Array or Dask Series of shape = [n_samples] or None, optional (default=None)",
1414
        group_shape="Dask Array or Dask Series or None, optional (default=None)",
1415
1416
1417
        eval_sample_weight_shape="list of Dask Array or Dask Series, or None, optional (default=None)",
        eval_init_score_shape="list of Dask Array or Dask Series, or None, optional (default=None)",
        eval_group_shape="list of Dask Array or Dask Series, or None, optional (default=None)"
1418
1419
    )

1420
    # DaskLGBMRegressor does not support group, eval_class_weight, eval_group.
1421
    _base_doc = (_base_doc[:_base_doc.find('group :')]
1422
1423
1424
1425
1426
1427
1428
1429
                 + _base_doc[_base_doc.find('eval_set :'):])

    _base_doc = (_base_doc[:_base_doc.find('eval_class_weight :')]
                 + _base_doc[_base_doc.find('eval_init_score :'):])

    _base_doc = (_base_doc[:_base_doc.find('eval_group :')]
                 + _base_doc[_base_doc.find('eval_metric :'):])

1430
    # DaskLGBMRegressor support for callbacks and init_model is not tested
1431
1432
    fit.__doc__ = f"""{_base_doc[:_base_doc.find('callbacks :')]}**kwargs
        Other parameters passed through to ``LGBMRegressor.fit()``.
1433

1434
1435
1436
1437
1438
    Returns
    -------
    self : lightgbm.DaskLGBMRegressor
        Returns self.

1439
    {_lgbmmodel_doc_custom_eval_note}
1440
        """
1441

1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
    def predict(
        self,
        X: _DaskMatrixLike,
        raw_score: bool = False,
        start_iteration: int = 0,
        num_iteration: Optional[int] = None,
        pred_leaf: bool = False,
        pred_contrib: bool = False,
        validate_features: bool = False,
        **kwargs: Any
    ) -> dask_Array:
1453
        """Docstring is inherited from the lightgbm.LGBMRegressor.predict."""
1454
1455
1456
        return _predict(
            model=self.to_local(),
            data=X,
1457
            client=_get_dask_client(self.client),
1458
1459
1460
1461
1462
1463
            raw_score=raw_score,
            start_iteration=start_iteration,
            num_iteration=num_iteration,
            pred_leaf=pred_leaf,
            pred_contrib=pred_contrib,
            validate_features=validate_features,
1464
1465
1466
            **kwargs
        )

1467
1468
1469
1470
1471
1472
1473
1474
    predict.__doc__ = _lgbmmodel_doc_predict.format(
        description="Return the predicted value for each sample.",
        X_shape="Dask Array or Dask DataFrame of shape = [n_samples, n_features]",
        output_name="predicted_result",
        predicted_result_shape="Dask Array of shape = [n_samples]",
        X_leaves_shape="Dask Array of shape = [n_samples, n_trees]",
        X_SHAP_values_shape="Dask Array of shape = [n_samples, n_features + 1]"
    )
1475

1476
    def to_local(self) -> LGBMRegressor:
1477
1478
1479
1480
1481
        """Create regular version of lightgbm.LGBMRegressor from the distributed version.

        Returns
        -------
        model : lightgbm.LGBMRegressor
1482
            Local underlying model.
1483
        """
1484
        return self._lgb_dask_to_local(LGBMRegressor)
1485
1486


1487
class DaskLGBMRanker(LGBMRanker, _DaskLGBMModel):
1488
    """Distributed version of lightgbm.LGBMRanker."""
1489

1490
1491
1492
1493
1494
1495
1496
1497
    def __init__(
        self,
        boosting_type: str = 'gbdt',
        num_leaves: int = 31,
        max_depth: int = -1,
        learning_rate: float = 0.1,
        n_estimators: int = 100,
        subsample_for_bin: int = 200000,
1498
        objective: Optional[Union[str, _LGBM_ScikitCustomObjectiveFunction]] = None,
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
        class_weight: Optional[Union[dict, str]] = None,
        min_split_gain: float = 0.,
        min_child_weight: float = 1e-3,
        min_child_samples: int = 20,
        subsample: float = 1.,
        subsample_freq: int = 0,
        colsample_bytree: float = 1.,
        reg_alpha: float = 0.,
        reg_lambda: float = 0.,
        random_state: Optional[Union[int, np.random.RandomState]] = None,
1509
        n_jobs: Optional[int] = None,
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
        importance_type: str = 'split',
        client: Optional[Client] = None,
        **kwargs: Any
    ):
        """Docstring is inherited from the lightgbm.LGBMRanker.__init__."""
        self.client = client
        super().__init__(
            boosting_type=boosting_type,
            num_leaves=num_leaves,
            max_depth=max_depth,
            learning_rate=learning_rate,
            n_estimators=n_estimators,
            subsample_for_bin=subsample_for_bin,
            objective=objective,
            class_weight=class_weight,
            min_split_gain=min_split_gain,
            min_child_weight=min_child_weight,
            min_child_samples=min_child_samples,
            subsample=subsample,
            subsample_freq=subsample_freq,
            colsample_bytree=colsample_bytree,
            reg_alpha=reg_alpha,
            reg_lambda=reg_lambda,
            random_state=random_state,
            n_jobs=n_jobs,
            importance_type=importance_type,
            **kwargs
        )

    _base_doc = LGBMRanker.__init__.__doc__
1540
    _before_kwargs, _kwargs, _after_kwargs = _base_doc.partition('**kwargs')  # type: ignore
1541
    __init__.__doc__ = f"""
1542
1543
1544
1545
        {_before_kwargs}client : dask.distributed.Client or None, optional (default=None)
        {' ':4}Dask client. If ``None``, ``distributed.default_client()`` will be used at runtime. The Dask client used by this class will not be saved if the model object is pickled.
        {_kwargs}{_after_kwargs}
        """
1546
1547

    def __getstate__(self) -> Dict[Any, Any]:
1548
        return self._lgb_dask_getstate()
1549

1550
    def fit(  # type: ignore[override]
1551
1552
1553
        self,
        X: _DaskMatrixLike,
        y: _DaskCollection,
1554
1555
1556
        sample_weight: Optional[_DaskVectorLike] = None,
        init_score: Optional[_DaskVectorLike] = None,
        group: Optional[_DaskVectorLike] = None,
1557
1558
        eval_set: Optional[List[Tuple[_DaskMatrixLike, _DaskCollection]]] = None,
        eval_names: Optional[List[str]] = None,
1559
1560
1561
        eval_sample_weight: Optional[List[_DaskVectorLike]] = None,
        eval_init_score: Optional[List[_DaskVectorLike]] = None,
        eval_group: Optional[List[_DaskVectorLike]] = None,
1562
        eval_metric: Optional[_LGBM_ScikitEvalMetricType] = None,
1563
        eval_at: Union[List[int], Tuple[int, ...]] = (1, 2, 3, 4, 5),
1564
1565
        **kwargs: Any
    ) -> "DaskLGBMRanker":
1566
        """Docstring is inherited from the lightgbm.LGBMRanker.fit."""
1567
        self._lgb_dask_fit(
1568
1569
1570
1571
            model_factory=LGBMRanker,
            X=X,
            y=y,
            sample_weight=sample_weight,
1572
            init_score=init_score,
1573
            group=group,
1574
1575
1576
1577
1578
1579
1580
            eval_set=eval_set,
            eval_names=eval_names,
            eval_sample_weight=eval_sample_weight,
            eval_init_score=eval_init_score,
            eval_group=eval_group,
            eval_metric=eval_metric,
            eval_at=eval_at,
1581
1582
            **kwargs
        )
1583
        return self
1584

1585
1586
1587
    _base_doc = _lgbmmodel_doc_fit.format(
        X_shape="Dask Array or Dask DataFrame of shape = [n_samples, n_features]",
        y_shape="Dask Array, Dask DataFrame or Dask Series of shape = [n_samples]",
1588
1589
        sample_weight_shape="Dask Array or Dask Series of shape = [n_samples] or None, optional (default=None)",
        init_score_shape="Dask Array or Dask Series of shape = [n_samples] or None, optional (default=None)",
1590
        group_shape="Dask Array or Dask Series or None, optional (default=None)",
1591
1592
1593
        eval_sample_weight_shape="list of Dask Array or Dask Series, or None, optional (default=None)",
        eval_init_score_shape="list of Dask Array or Dask Series, or None, optional (default=None)",
        eval_group_shape="list of Dask Array or Dask Series, or None, optional (default=None)"
1594
1595
    )

1596
1597
1598
1599
    # DaskLGBMRanker does not support eval_class_weight or early stopping
    _base_doc = (_base_doc[:_base_doc.find('eval_class_weight :')]
                 + _base_doc[_base_doc.find('eval_init_score :'):])

1600
    _base_doc = (_base_doc[:_base_doc.find('feature_name :')]
1601
                 + "eval_at : list or tuple of int, optional (default=(1, 2, 3, 4, 5))\n"
1602
                 + f"{' ':8}The evaluation positions of the specified metric.\n"
1603
                 + f"{' ':4}{_base_doc[_base_doc.find('feature_name :'):]}")
1604
1605

    # DaskLGBMRanker support for callbacks and init_model is not tested
1606
1607
    fit.__doc__ = f"""{_base_doc[:_base_doc.find('callbacks :')]}**kwargs
        Other parameters passed through to ``LGBMRanker.fit()``.
1608

1609
1610
1611
1612
1613
    Returns
    -------
    self : lightgbm.DaskLGBMRanker
        Returns self.

1614
    {_lgbmmodel_doc_custom_eval_note}
1615
        """
1616

1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
    def predict(
        self,
        X: _DaskMatrixLike,
        raw_score: bool = False,
        start_iteration: int = 0,
        num_iteration: Optional[int] = None,
        pred_leaf: bool = False,
        pred_contrib: bool = False,
        validate_features: bool = False,
        **kwargs: Any
    ) -> dask_Array:
1628
        """Docstring is inherited from the lightgbm.LGBMRanker.predict."""
1629
1630
1631
1632
        return _predict(
            model=self.to_local(),
            data=X,
            client=_get_dask_client(self.client),
1633
1634
1635
1636
1637
1638
            raw_score=raw_score,
            start_iteration=start_iteration,
            num_iteration=num_iteration,
            pred_leaf=pred_leaf,
            pred_contrib=pred_contrib,
            validate_features=validate_features,
1639
1640
            **kwargs
        )
1641

1642
1643
1644
1645
1646
1647
1648
1649
    predict.__doc__ = _lgbmmodel_doc_predict.format(
        description="Return the predicted value for each sample.",
        X_shape="Dask Array or Dask DataFrame of shape = [n_samples, n_features]",
        output_name="predicted_result",
        predicted_result_shape="Dask Array of shape = [n_samples]",
        X_leaves_shape="Dask Array of shape = [n_samples, n_trees]",
        X_SHAP_values_shape="Dask Array of shape = [n_samples, n_features + 1]"
    )
1650

1651
    def to_local(self) -> LGBMRanker:
1652
1653
1654
1655
1656
        """Create regular version of lightgbm.LGBMRanker from the distributed version.

        Returns
        -------
        model : lightgbm.LGBMRanker
1657
            Local underlying model.
1658
        """
1659
        return self._lgb_dask_to_local(LGBMRanker)