dask.py 62.8 KB
Newer Older
1
# coding: utf-8
2
"""Distributed training with LightGBM and dask.distributed.
3

4
This module enables you to perform distributed training with LightGBM on
5
dask.Array and dask.DataFrame collections.
6
7

It is based on dask-lightgbm, which was based on dask-xgboost.
8
"""
9
import socket
10
from collections import defaultdict, namedtuple
11
from copy import deepcopy
12
from enum import Enum, auto
13
from functools import partial
14
from typing import Any, Dict, Iterable, List, Optional, Tuple, Type, Union
15
16
17
from urllib.parse import urlparse

import numpy as np
18
19
import scipy.sparse as ss

20
from .basic import _LIB, LightGBMError, _choose_param_value, _ConfigAliases, _log_info, _log_warning, _safe_call
21
from .compat import (DASK_INSTALLED, PANDAS_INSTALLED, SKLEARN_INSTALLED, Client, LGBMNotFittedError, concat,
22
23
                     dask_Array, dask_array_from_delayed, dask_bag_from_delayed, dask_DataFrame, dask_Series,
                     default_client, delayed, pd_DataFrame, pd_Series, wait)
24
from .sklearn import (LGBMClassifier, LGBMModel, LGBMRanker, LGBMRegressor, _LGBM_ScikitCustomEvalFunction,
25
26
                      _LGBM_ScikitCustomObjectiveFunction, _lgbmmodel_doc_custom_eval_note, _lgbmmodel_doc_fit,
                      _lgbmmodel_doc_predict)
27
28
29

_DaskCollection = Union[dask_Array, dask_DataFrame, dask_Series]
_DaskMatrixLike = Union[dask_Array, dask_DataFrame]
30
_DaskVectorLike = Union[dask_Array, dask_Series]
31
32
_DaskPart = Union[np.ndarray, pd_DataFrame, pd_Series, ss.spmatrix]
_PredictionDtype = Union[Type[np.float32], Type[np.float64], Type[np.int32], Type[np.int64]]
33

Nikita Titov's avatar
Nikita Titov committed
34
_HostWorkers = namedtuple('_HostWorkers', ['default', 'all'])
35

36

37
38
39
40
41
42
43
44
45
46
47
48
class _DatasetNames(Enum):
    """Placeholder names used by lightgbm.dask internals to say 'also evaluate the training data'.

    Avoid duplicating the training data when the validation set refers to elements of training data.
    """

    TRAINSET = auto()
    SAMPLE_WEIGHT = auto()
    INIT_SCORE = auto()
    GROUP = auto()


49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
def _get_dask_client(client: Optional[Client]) -> Client:
    """Choose a Dask client to use.

    Parameters
    ----------
    client : dask.distributed.Client or None
        Dask client.

    Returns
    -------
    client : dask.distributed.Client
        A Dask client.
    """
    if client is None:
        return default_client()
    else:
        return client


68
69
def _find_n_open_ports(n: int) -> List[int]:
    """Find n random open ports on localhost.
70
71
72

    Returns
    -------
73
74
    ports : list of int
        n random open ports on localhost.
75
    """
76
77
78
    sockets = []
    for _ in range(n):
        s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
79
        s.bind(('', 0))
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
        sockets.append(s)
    ports = []
    for s in sockets:
        ports.append(s.getsockname()[1])
        s.close()
    return ports


def _group_workers_by_host(worker_addresses: Iterable[str]) -> Dict[str, _HostWorkers]:
    """Group all worker addresses by hostname.

    Returns
    -------
    host_to_workers : dict
        mapping from hostname to all its workers.
    """
    host_to_workers: Dict[str, _HostWorkers] = {}
    for address in worker_addresses:
        hostname = urlparse(address).hostname
99
100
        if not hostname:
            raise ValueError(f"Could not parse host name from worker address '{address}'")
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
        if hostname not in host_to_workers:
            host_to_workers[hostname] = _HostWorkers(default=address, all=[address])
        else:
            host_to_workers[hostname].all.append(address)
    return host_to_workers


def _assign_open_ports_to_workers(
    client: Client,
    host_to_workers: Dict[str, _HostWorkers]
) -> Dict[str, int]:
    """Assign an open port to each worker.

    Returns
    -------
    worker_to_port: dict
        mapping from worker address to an open port.
    """
    host_ports_futures = {}
    for hostname, workers in host_to_workers.items():
        n_workers_in_host = len(workers.all)
        host_ports_futures[hostname] = client.submit(
            _find_n_open_ports,
            n=n_workers_in_host,
            workers=[workers.default],
            pure=False,
            allow_other_workers=False,
        )
    found_ports = client.gather(host_ports_futures)
    worker_to_port = {}
    for hostname, workers in host_to_workers.items():
        for worker, port in zip(workers.all, found_ports[hostname]):
            worker_to_port[worker] = port
    return worker_to_port
135
136


137
def _concat(seq: List[_DaskPart]) -> _DaskPart:
138
139
    if isinstance(seq[0], np.ndarray):
        return np.concatenate(seq, axis=0)
140
    elif isinstance(seq[0], (pd_DataFrame, pd_Series)):
141
        return concat(seq, axis=0)
142
143
144
    elif isinstance(seq[0], ss.spmatrix):
        return ss.vstack(seq, format='csr')
    else:
145
        raise TypeError(f'Data must be one of: numpy arrays, pandas dataframes, sparse matrices (from scipy). Got {type(seq[0]).__name__}.')
146
147


148
149
150
151
def _remove_list_padding(*args: Any) -> List[List[Any]]:
    return [[z for z in arg if z is not None] for arg in args]


152
def _pad_eval_names(lgbm_model: LGBMModel, required_names: List[str]) -> LGBMModel:
153
154
155
156
157
158
159
160
161
162
163
164
165
166
    """Append missing (key, value) pairs to a LightGBM model's evals_result_ and best_score_ OrderedDict attrs based on a set of required eval_set names.

    Allows users to rely on expected eval_set names being present when fitting DaskLGBM estimators with ``eval_set``.
    """
    not_evaluated = 'not evaluated'
    for eval_name in required_names:
        if eval_name not in lgbm_model.evals_result_:
            lgbm_model.evals_result_[eval_name] = not_evaluated
        if eval_name not in lgbm_model.best_score_:
            lgbm_model.best_score_[eval_name] = not_evaluated

    return lgbm_model


167
168
169
170
def _train_part(
    params: Dict[str, Any],
    model_factory: Type[LGBMModel],
    list_of_parts: List[Dict[str, _DaskPart]],
171
172
173
    machines: str,
    local_listen_port: int,
    num_machines: int,
174
175
176
177
    return_model: bool,
    time_out: int = 120,
    **kwargs: Any
) -> Optional[LGBMModel]:
178
    network_params = {
179
180
        'machines': machines,
        'local_listen_port': local_listen_port,
181
        'time_out': time_out,
182
        'num_machines': num_machines
183
    }
184
185
    params.update(network_params)

186
187
    is_ranker = issubclass(model_factory, LGBMRanker)

188
    # Concatenate many parts into one
189
190
191
192
193
194
195
196
197
198
199
200
    data = _concat([x['data'] for x in list_of_parts])
    label = _concat([x['label'] for x in list_of_parts])

    if 'weight' in list_of_parts[0]:
        weight = _concat([x['weight'] for x in list_of_parts])
    else:
        weight = None

    if 'group' in list_of_parts[0]:
        group = _concat([x['group'] for x in list_of_parts])
    else:
        group = None
201

202
203
204
205
206
    if 'init_score' in list_of_parts[0]:
        init_score = _concat([x['init_score'] for x in list_of_parts])
    else:
        init_score = None

207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
    # construct local eval_set data.
    n_evals = max(len(x.get('eval_set', [])) for x in list_of_parts)
    eval_names = kwargs.pop('eval_names', None)
    eval_class_weight = kwargs.get('eval_class_weight')
    local_eval_set = None
    local_eval_names = None
    local_eval_sample_weight = None
    local_eval_init_score = None
    local_eval_group = None

    if n_evals:
        has_eval_sample_weight = any(x.get('eval_sample_weight') is not None for x in list_of_parts)
        has_eval_init_score = any(x.get('eval_init_score') is not None for x in list_of_parts)

        local_eval_set = []
        evals_result_names = []
        if has_eval_sample_weight:
            local_eval_sample_weight = []
        if has_eval_init_score:
            local_eval_init_score = []
        if is_ranker:
            local_eval_group = []

        # store indices of eval_set components that were not contained within local parts.
        missing_eval_component_idx = []

        # consolidate parts of each individual eval component.
        for i in range(n_evals):
            x_e = []
            y_e = []
            w_e = []
            init_score_e = []
            g_e = []
            for part in list_of_parts:
                if not part.get('eval_set'):
                    continue

                # require that eval_name exists in evaluated result data in case dropped due to padding.
                # in distributed training the 'training' eval_set is not detected, will have name 'valid_<index>'.
                if eval_names:
                    evals_result_name = eval_names[i]
                else:
                    evals_result_name = f'valid_{i}'

                eval_set = part['eval_set'][i]
                if eval_set is _DatasetNames.TRAINSET:
                    x_e.append(part['data'])
                    y_e.append(part['label'])
                else:
                    x_e.extend(eval_set[0])
                    y_e.extend(eval_set[1])

                if evals_result_name not in evals_result_names:
                    evals_result_names.append(evals_result_name)

                eval_weight = part.get('eval_sample_weight')
                if eval_weight:
                    if eval_weight[i] is _DatasetNames.SAMPLE_WEIGHT:
                        w_e.append(part['weight'])
                    else:
                        w_e.extend(eval_weight[i])

                eval_init_score = part.get('eval_init_score')
                if eval_init_score:
                    if eval_init_score[i] is _DatasetNames.INIT_SCORE:
                        init_score_e.append(part['init_score'])
                    else:
                        init_score_e.extend(eval_init_score[i])

                eval_group = part.get('eval_group')
                if eval_group:
                    if eval_group[i] is _DatasetNames.GROUP:
                        g_e.append(part['group'])
                    else:
                        g_e.extend(eval_group[i])

            # filter padding from eval parts then _concat each eval_set component.
            x_e, y_e, w_e, init_score_e, g_e = _remove_list_padding(x_e, y_e, w_e, init_score_e, g_e)
            if x_e:
                local_eval_set.append((_concat(x_e), _concat(y_e)))
            else:
                missing_eval_component_idx.append(i)
                continue

            if w_e:
                local_eval_sample_weight.append(_concat(w_e))
            if init_score_e:
                local_eval_init_score.append(_concat(init_score_e))
            if g_e:
                local_eval_group.append(_concat(g_e))

        # reconstruct eval_set fit args/kwargs depending on which components of eval_set are on worker.
        eval_component_idx = [i for i in range(n_evals) if i not in missing_eval_component_idx]
        if eval_names:
            local_eval_names = [eval_names[i] for i in eval_component_idx]
        if eval_class_weight:
            kwargs['eval_class_weight'] = [eval_class_weight[i] for i in eval_component_idx]

305
306
    try:
        model = model_factory(**params)
307
        if is_ranker:
308
309
310
311
312
313
314
315
316
317
318
319
320
            model.fit(
                data,
                label,
                sample_weight=weight,
                init_score=init_score,
                group=group,
                eval_set=local_eval_set,
                eval_sample_weight=local_eval_sample_weight,
                eval_init_score=local_eval_init_score,
                eval_group=local_eval_group,
                eval_names=local_eval_names,
                **kwargs
            )
321
        else:
322
323
324
325
326
327
328
329
330
331
332
            model.fit(
                data,
                label,
                sample_weight=weight,
                init_score=init_score,
                eval_set=local_eval_set,
                eval_sample_weight=local_eval_sample_weight,
                eval_init_score=local_eval_init_score,
                eval_names=local_eval_names,
                **kwargs
            )
333

334
335
336
    finally:
        _safe_call(_LIB.LGBM_NetworkFree())

337
338
339
340
    if n_evals:
        # ensure that expected keys for evals_result_ and best_score_ exist regardless of padding.
        model = _pad_eval_names(model, required_names=evals_result_names)

341
342
343
    return model if return_model else None


344
def _split_to_parts(data: _DaskCollection, is_matrix: bool) -> List[_DaskPart]:
345
346
    parts = data.to_delayed()
    if isinstance(parts, np.ndarray):
347
348
349
350
        if is_matrix:
            assert parts.shape[1] == 1
        else:
            assert parts.ndim == 1 or parts.shape[1] == 1
351
352
353
354
        parts = parts.flatten().tolist()
    return parts


355
def _machines_to_worker_map(machines: str, worker_addresses: Iterable[str]) -> Dict[str, int]:
356
357
358
359
360
361
362
363
364
365
    """Create a worker_map from machines list.

    Given ``machines`` and a list of Dask worker addresses, return a mapping where the keys are
    ``worker_addresses`` and the values are ports from ``machines``.

    Parameters
    ----------
    machines : str
        A comma-delimited list of workers, of the form ``ip1:port,ip2:port``.
    worker_addresses : list of str
366
        An iterable of Dask worker addresses, of the form ``{protocol}{hostname}:{port}``, where ``port`` is the port Dask's scheduler uses to talk to that worker.
367
368
369
370
371
372
373

    Returns
    -------
    result : Dict[str, int]
        Dictionary where keys are work addresses in the form expected by Dask and values are a port for LightGBM to use.
    """
    machine_addresses = machines.split(",")
374
375
376
377

    if len(set(machine_addresses)) != len(machine_addresses):
        raise ValueError(f"Found duplicates in 'machines' ({machines}). Each entry in 'machines' must be a unique IP-port combination.")

378
379
380
381
382
383
384
385
    machine_to_port = defaultdict(set)
    for address in machine_addresses:
        host, port = address.split(":")
        machine_to_port[host].add(int(port))

    out = {}
    for address in worker_addresses:
        worker_host = urlparse(address).hostname
386
387
        if not worker_host:
            raise ValueError(f"Could not parse host name from worker address '{address}'")
388
389
390
391
392
        out[address] = machine_to_port[worker_host].pop()

    return out


393
394
395
396
397
398
def _train(
    client: Client,
    data: _DaskMatrixLike,
    label: _DaskCollection,
    params: Dict[str, Any],
    model_factory: Type[LGBMModel],
399
    sample_weight: Optional[_DaskVectorLike] = None,
400
    init_score: Optional[_DaskCollection] = None,
401
    group: Optional[_DaskVectorLike] = None,
402
403
    eval_set: Optional[List[Tuple[_DaskMatrixLike, _DaskCollection]]] = None,
    eval_names: Optional[List[str]] = None,
404
    eval_sample_weight: Optional[List[_DaskVectorLike]] = None,
405
    eval_class_weight: Optional[List[Union[dict, str]]] = None,
406
    eval_init_score: Optional[List[_DaskCollection]] = None,
407
    eval_group: Optional[List[_DaskVectorLike]] = None,
408
    eval_metric: Optional[Union[_LGBM_ScikitCustomEvalFunction, str, List[Union[_LGBM_ScikitCustomEvalFunction, str]]]] = None,
409
    eval_at: Optional[Iterable[int]] = None,
410
411
    **kwargs: Any
) -> LGBMModel:
412
413
414
415
    """Inner train routine.

    Parameters
    ----------
416
417
    client : dask.distributed.Client
        Dask client.
418
    data : Dask Array or Dask DataFrame of shape = [n_samples, n_features]
419
        Input feature matrix.
420
    label : Dask Array, Dask DataFrame or Dask Series of shape = [n_samples]
421
422
        The target values (class labels in classification, real numbers in regression).
    params : dict
423
        Parameters passed to constructor of the local underlying model.
424
    model_factory : lightgbm.LGBMClassifier, lightgbm.LGBMRegressor, or lightgbm.LGBMRanker class
425
        Class of the local underlying model.
426
    sample_weight : Dask Array or Dask Series of shape = [n_samples] or None, optional (default=None)
427
        Weights of training data. Weights should be non-negative.
428
    init_score : Dask Array or Dask Series of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task), or Dask Array or Dask DataFrame of shape = [n_samples, n_classes] (for multi-class task), or None, optional (default=None)
429
        Init score of training data.
430
    group : Dask Array or Dask Series or None, optional (default=None)
431
432
433
434
435
        Group/query data.
        Only used in the learning-to-rank task.
        sum(group) = n_samples.
        For example, if you have a 100-document dataset with ``group = [10, 20, 40, 10, 10, 10]``, that means that you have 6 groups,
        where the first 10 records are in the first group, records 11-30 are in the second group, records 31-70 are in the third group, etc.
436
    eval_set : list of (X, y) tuples of Dask data collections, or None, optional (default=None)
437
438
439
440
        List of (X, y) tuple pairs to use as validation sets.
        Note, that not all workers may receive chunks of every eval set within ``eval_set``. When the returned
        lightgbm estimator is not trained using any chunks of a particular eval set, its corresponding component
        of evals_result_ and best_score_ will be 'not_evaluated'.
441
    eval_names : list of str, or None, optional (default=None)
442
        Names of eval_set.
443
    eval_sample_weight : list of Dask Array or Dask Series, or None, optional (default=None)
444
        Weights for each validation set in eval_set. Weights should be non-negative.
445
446
    eval_class_weight : list of dict or str, or None, optional (default=None)
        Class weights, one dict or str for each validation set in eval_set.
447
    eval_init_score : list of Dask Array, Dask Series or Dask DataFrame (for multi-class task), or None, optional (default=None)
448
        Initial model score for each validation set in eval_set.
449
    eval_group : list of Dask Array or Dask Series, or None, optional (default=None)
450
        Group/query for each validation set in eval_set.
451
452
    eval_metric : str, callable, list or None, optional (default=None)
        If str, it should be a built-in evaluation metric to use.
453
454
455
456
457
458
        If callable, it should be a custom evaluation metric, see note below for more details.
        If list, it can be a list of built-in metrics, a list of custom evaluation metrics, or a mix of both.
        In either case, the ``metric`` from the Dask model parameters (or inferred from the objective) will be evaluated and used as well.
        Default: 'l2' for DaskLGBMRegressor, 'binary(multi)_logloss' for DaskLGBMClassifier, 'ndcg' for DaskLGBMRanker.
    eval_at : iterable of int, optional (default=None)
        The evaluation positions of the specified ranking metric.
459
460
461
462
463
464
465
    **kwargs
        Other parameters passed to ``fit`` method of the local underlying model.

    Returns
    -------
    model : lightgbm.LGBMClassifier, lightgbm.LGBMRegressor, or lightgbm.LGBMRanker class
        Returns fitted underlying model.
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494

    Note
    ----

    This method handles setting up the following network parameters based on information
    about the Dask cluster referenced by ``client``.

    * ``local_listen_port``: port that each LightGBM worker opens a listening socket on,
            to accept connections from other workers. This can differ from LightGBM worker
            to LightGBM worker, but does not have to.
    * ``machines``: a comma-delimited list of all workers in the cluster, in the
            form ``ip:port,ip:port``. If running multiple Dask workers on the same host, use different
            ports for each worker. For example, for ``LocalCluster(n_workers=3)``, you might
            pass ``"127.0.0.1:12400,127.0.0.1:12401,127.0.0.1:12402"``.
    * ``num_machines``: number of LightGBM workers.
    * ``timeout``: time in minutes to wait before closing unused sockets.

    The default behavior of this function is to generate ``machines`` from the list of
    Dask workers which hold some piece of the training data, and to search for an open
    port on each worker to be used as ``local_listen_port``.

    If ``machines`` is provided explicitly in ``params``, this function uses the hosts
    and ports in that list directly, and does not do any searching. This means that if
    any of the Dask workers are missing from the list or any of those ports are not free
    when training starts, training will fail.

    If ``local_listen_port`` is provided in ``params`` and ``machines`` is not, this function
    constructs ``machines`` from the list of Dask workers which hold some piece of the
    training data, assuming that each one will use the same ``local_listen_port``.
495
    """
496
497
    params = deepcopy(params)

498
499
500
501
502
503
504
505
    # capture whether local_listen_port or its aliases were provided
    listen_port_in_params = any(
        alias in params for alias in _ConfigAliases.get("local_listen_port")
    )

    # capture whether machines or its aliases were provided
    machines_in_params = any(
        alias in params for alias in _ConfigAliases.get("machines")
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
    )

    params = _choose_param_value(
        main_param_name="tree_learner",
        params=params,
        default_value="data"
    )
    allowed_tree_learners = {
        'data',
        'data_parallel',
        'feature',
        'feature_parallel',
        'voting',
        'voting_parallel'
    }
    if params["tree_learner"] not in allowed_tree_learners:
522
        _log_warning(f'Parameter tree_learner set to {params["tree_learner"]}, which is not allowed. Using "data" as default')
523
524
525
526
527
        params['tree_learner'] = 'data'

    # Some passed-in parameters can be removed:
    #   * 'num_machines': set automatically from Dask worker list
    #   * 'num_threads': overridden to match nthreads on each Dask process
528
529
530
531
    for param_alias in _ConfigAliases.get('num_machines', 'num_threads'):
        if param_alias in params:
            _log_warning(f"Parameter {param_alias} will be ignored.")
            params.pop(param_alias)
532

533
    # Split arrays/dataframes into parts. Arrange parts into dicts to enforce co-locality
534
535
    data_parts = _split_to_parts(data=data, is_matrix=True)
    label_parts = _split_to_parts(data=label, is_matrix=False)
536
    parts = [{'data': x, 'label': y} for (x, y) in zip(data_parts, label_parts)]
537
    n_parts = len(parts)
538
539
540

    if sample_weight is not None:
        weight_parts = _split_to_parts(data=sample_weight, is_matrix=False)
541
        for i in range(n_parts):
542
            parts[i]['weight'] = weight_parts[i]
543
544
545

    if group is not None:
        group_parts = _split_to_parts(data=group, is_matrix=False)
546
        for i in range(n_parts):
547
            parts[i]['group'] = group_parts[i]
548

549
550
551
552
553
    if init_score is not None:
        init_score_parts = _split_to_parts(data=init_score, is_matrix=False)
        for i in range(n_parts):
            parts[i]['init_score'] = init_score_parts[i]

554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
    # evals_set will to be re-constructed into smaller lists of (X, y) tuples, where
    # X and y are each delayed sub-lists of original eval dask Collections.
    if eval_set:
        # find maximum number of parts in an individual eval set so that we can
        # pad eval sets when they come in different sizes.
        n_largest_eval_parts = max(x[0].npartitions for x in eval_set)

        eval_sets = defaultdict(list)
        if eval_sample_weight:
            eval_sample_weights = defaultdict(list)
        if eval_group:
            eval_groups = defaultdict(list)
        if eval_init_score:
            eval_init_scores = defaultdict(list)

        for i, (X_eval, y_eval) in enumerate(eval_set):
            n_this_eval_parts = X_eval.npartitions

            # when individual eval set is equivalent to training data, skip recomputing parts.
            if X_eval is data and y_eval is label:
                for parts_idx in range(n_parts):
                    eval_sets[parts_idx].append(_DatasetNames.TRAINSET)
            else:
                eval_x_parts = _split_to_parts(data=X_eval, is_matrix=True)
                eval_y_parts = _split_to_parts(data=y_eval, is_matrix=False)
                for j in range(n_largest_eval_parts):
                    parts_idx = j % n_parts

                    # add None-padding for individual eval_set member if it is smaller than the largest member.
                    if j < n_this_eval_parts:
                        x_e = eval_x_parts[j]
                        y_e = eval_y_parts[j]
                    else:
                        x_e = None
                        y_e = None

                    if j < n_parts:
                        # first time a chunk of this eval set is added to this part.
                        eval_sets[parts_idx].append(([x_e], [y_e]))
                    else:
                        # append additional chunks of this eval set to this part.
                        eval_sets[parts_idx][-1][0].append(x_e)
                        eval_sets[parts_idx][-1][1].append(y_e)

            if eval_sample_weight:
                if eval_sample_weight[i] is sample_weight:
                    for parts_idx in range(n_parts):
                        eval_sample_weights[parts_idx].append(_DatasetNames.SAMPLE_WEIGHT)
                else:
                    eval_w_parts = _split_to_parts(data=eval_sample_weight[i], is_matrix=False)

                    # ensure that all evaluation parts map uniquely to one part.
                    for j in range(n_largest_eval_parts):
                        if j < n_this_eval_parts:
                            w_e = eval_w_parts[j]
                        else:
                            w_e = None

                        parts_idx = j % n_parts
                        if j < n_parts:
                            eval_sample_weights[parts_idx].append([w_e])
                        else:
                            eval_sample_weights[parts_idx][-1].append(w_e)

            if eval_init_score:
                if eval_init_score[i] is init_score:
                    for parts_idx in range(n_parts):
                        eval_init_scores[parts_idx].append(_DatasetNames.INIT_SCORE)
                else:
                    eval_init_score_parts = _split_to_parts(data=eval_init_score[i], is_matrix=False)
                    for j in range(n_largest_eval_parts):
                        if j < n_this_eval_parts:
                            init_score_e = eval_init_score_parts[j]
                        else:
                            init_score_e = None

                        parts_idx = j % n_parts
                        if j < n_parts:
                            eval_init_scores[parts_idx].append([init_score_e])
                        else:
                            eval_init_scores[parts_idx][-1].append(init_score_e)

            if eval_group:
                if eval_group[i] is group:
                    for parts_idx in range(n_parts):
                        eval_groups[parts_idx].append(_DatasetNames.GROUP)
                else:
                    eval_g_parts = _split_to_parts(data=eval_group[i], is_matrix=False)
                    for j in range(n_largest_eval_parts):
                        if j < n_this_eval_parts:
                            g_e = eval_g_parts[j]
                        else:
                            g_e = None

                        parts_idx = j % n_parts
                        if j < n_parts:
                            eval_groups[parts_idx].append([g_e])
                        else:
                            eval_groups[parts_idx][-1].append(g_e)

        # assign sub-eval_set components to worker parts.
        for parts_idx, e_set in eval_sets.items():
            parts[parts_idx]['eval_set'] = e_set
            if eval_sample_weight:
                parts[parts_idx]['eval_sample_weight'] = eval_sample_weights[parts_idx]
            if eval_init_score:
                parts[parts_idx]['eval_init_score'] = eval_init_scores[parts_idx]
            if eval_group:
                parts[parts_idx]['eval_group'] = eval_groups[parts_idx]

664
    # Start computation in the background
665
    parts = list(map(delayed, parts))
666
667
668
669
    parts = client.compute(parts)
    wait(parts)

    for part in parts:
670
        if part.status == 'error':  # type: ignore
671
672
673
            return part  # trigger error locally

    # Find locations of all parts and map them to particular Dask workers
674
    key_to_part_dict = {part.key: part for part in parts}  # type: ignore
675
676
677
678
679
    who_has = client.who_has(parts)
    worker_map = defaultdict(list)
    for key, workers in who_has.items():
        worker_map[next(iter(workers))].append(key_to_part_dict[key])

680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
    # Check that all workers were provided some of eval_set. Otherwise warn user that validation
    # data artifacts may not be populated depending on worker returning final estimator.
    if eval_set:
        for worker in worker_map:
            has_eval_set = False
            for part in worker_map[worker]:
                if 'eval_set' in part.result():
                    has_eval_set = True
                    break

            if not has_eval_set:
                _log_warning(
                    f"Worker {worker} was not allocated eval_set data. Therefore evals_result_ and best_score_ data may be unreliable. "
                    "Try rebalancing data across workers."
                )

    # assign general validation set settings to fit kwargs.
    if eval_names:
        kwargs['eval_names'] = eval_names
    if eval_class_weight:
        kwargs['eval_class_weight'] = eval_class_weight
    if eval_metric:
        kwargs['eval_metric'] = eval_metric
    if eval_at:
        kwargs['eval_at'] = eval_at

706
707
708
    master_worker = next(iter(worker_map))
    worker_ncores = client.ncores()

709
710
711
712
713
714
    # resolve aliases for network parameters and pop the result off params.
    # these values are added back in calls to `_train_part()`
    params = _choose_param_value(
        main_param_name="local_listen_port",
        params=params,
        default_value=12400
715
    )
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
    local_listen_port = params.pop("local_listen_port")

    params = _choose_param_value(
        main_param_name="machines",
        params=params,
        default_value=None
    )
    machines = params.pop("machines")

    # figure out network params
    worker_addresses = worker_map.keys()
    if machines is not None:
        _log_info("Using passed-in 'machines' parameter")
        worker_address_to_port = _machines_to_worker_map(
            machines=machines,
            worker_addresses=worker_addresses
        )
    else:
        if listen_port_in_params:
            _log_info("Using passed-in 'local_listen_port' for all workers")
            unique_hosts = set(urlparse(a).hostname for a in worker_addresses)
            if len(unique_hosts) < len(worker_addresses):
                msg = (
                    "'local_listen_port' was provided in Dask training parameters, but at least one "
                    "machine in the cluster has multiple Dask worker processes running on it. Please omit "
                    "'local_listen_port' or pass 'machines'."
                )
                raise LightGBMError(msg)

            worker_address_to_port = {
                address: local_listen_port
                for address in worker_addresses
            }
        else:
            _log_info("Finding random open ports for workers")
751
752
            host_to_workers = _group_workers_by_host(worker_map.keys())
            worker_address_to_port = _assign_open_ports_to_workers(client, host_to_workers)
753

754
        machines = ','.join([
755
            f'{urlparse(worker_address).hostname}:{port}'
756
757
758
759
760
            for worker_address, port
            in worker_address_to_port.items()
        ])

    num_machines = len(worker_address_to_port)
761

762
    # Tell each worker to train on the parts that it has locally
763
    #
764
    # This code treats ``_train_part()`` calls as not "pure" because:
765
    #     1. there is randomness in the training process unless parameters ``seed``
766
    #        and ``deterministic`` are set
767
768
769
    #     2. even with those parameters set, the output of one ``_train_part()`` call
    #        relies on global state (it and all the other LightGBM training processes
    #        coordinate with each other)
770
771
772
773
774
775
    futures_classifiers = [
        client.submit(
            _train_part,
            model_factory=model_factory,
            params={**params, 'num_threads': worker_ncores[worker]},
            list_of_parts=list_of_parts,
776
777
778
            machines=machines,
            local_listen_port=worker_address_to_port[worker],
            num_machines=num_machines,
779
780
            time_out=params.get('time_out', 120),
            return_model=(worker == master_worker),
781
782
783
            workers=[worker],
            allow_other_workers=False,
            pure=False,
784
785
786
787
            **kwargs
        )
        for worker, list_of_parts in worker_map.items()
    ]
788
789
790

    results = client.gather(futures_classifiers)
    results = [v for v in results if v]
791
792
793
    model = results[0]

    # if network parameters were changed during training, remove them from the
Andrew Ziem's avatar
Andrew Ziem committed
794
    # returned model so that they're generated dynamically on every run based
795
796
797
798
799
800
801
802
803
804
805
806
807
808
    # on the Dask cluster you're connected to and which workers have pieces of
    # the training data
    if not listen_port_in_params:
        for param in _ConfigAliases.get('local_listen_port'):
            model._other_params.pop(param, None)

    if not machines_in_params:
        for param in _ConfigAliases.get('machines'):
            model._other_params.pop(param, None)

    for param in _ConfigAliases.get('num_machines', 'timeout'):
        model._other_params.pop(param, None)

    return model
809
810


811
812
813
814
815
816
817
818
819
def _predict_part(
    part: _DaskPart,
    model: LGBMModel,
    raw_score: bool,
    pred_proba: bool,
    pred_leaf: bool,
    pred_contrib: bool,
    **kwargs: Any
) -> _DaskPart:
820

821
    if part.shape[0] == 0:
822
        result = np.array([])
823
824
    elif pred_proba:
        result = model.predict_proba(
825
            part,
826
827
828
829
830
            raw_score=raw_score,
            pred_leaf=pred_leaf,
            pred_contrib=pred_contrib,
            **kwargs
        )
831
    else:
832
        result = model.predict(
833
            part,
834
835
836
837
838
            raw_score=raw_score,
            pred_leaf=pred_leaf,
            pred_contrib=pred_contrib,
            **kwargs
        )
839

840
    # dask.DataFrame.map_partitions() expects each call to return a pandas DataFrame or Series
841
    if isinstance(part, pd_DataFrame):
842
        if len(result.shape) == 2:
843
            result = pd_DataFrame(result, index=part.index)
844
        else:
845
            result = pd_Series(result, index=part.index, name='predictions')
846
847
848
849

    return result


850
851
852
def _predict(
    model: LGBMModel,
    data: _DaskMatrixLike,
853
    client: Client,
854
855
856
857
858
859
    raw_score: bool = False,
    pred_proba: bool = False,
    pred_leaf: bool = False,
    pred_contrib: bool = False,
    dtype: _PredictionDtype = np.float32,
    **kwargs: Any
860
) -> Union[dask_Array, List[dask_Array]]:
861
862
863
864
    """Inner predict routine.

    Parameters
    ----------
865
    model : lightgbm.LGBMClassifier, lightgbm.LGBMRegressor, or lightgbm.LGBMRanker class
866
        Fitted underlying model.
867
    data : Dask Array or Dask DataFrame of shape = [n_samples, n_features]
868
        Input feature matrix.
869
870
    raw_score : bool, optional (default=False)
        Whether to predict raw scores.
871
872
873
874
875
876
    pred_proba : bool, optional (default=False)
        Should method return results of ``predict_proba`` (``pred_proba=True``) or ``predict`` (``pred_proba=False``).
    pred_leaf : bool, optional (default=False)
        Whether to predict leaf index.
    pred_contrib : bool, optional (default=False)
        Whether to predict feature contributions.
877
    dtype : np.dtype, optional (default=np.float32)
878
        Dtype of the output.
879
    **kwargs
880
        Other parameters passed to ``predict`` or ``predict_proba`` method.
881
882
883

    Returns
    -------
884
    predicted_result : Dask Array of shape = [n_samples] or shape = [n_samples, n_classes]
885
        The predicted values.
886
    X_leaves : Dask Array of shape = [n_samples, n_trees] or shape = [n_samples, n_trees * n_classes]
887
        If ``pred_leaf=True``, the predicted leaf of every tree for each sample.
888
    X_SHAP_values : Dask Array of shape = [n_samples, n_features + 1] or shape = [n_samples, (n_features + 1) * n_classes] or (if multi-class and using sparse inputs) a list of ``n_classes`` Dask Arrays of shape = [n_samples, n_features + 1]
889
        If ``pred_contrib=True``, the feature contributions for each sample.
890
    """
891
892
    if not all((DASK_INSTALLED, PANDAS_INSTALLED, SKLEARN_INSTALLED)):
        raise LightGBMError('dask, pandas and scikit-learn are required for lightgbm.dask')
893
    if isinstance(data, dask_DataFrame):
894
895
896
897
898
899
900
901
902
        return data.map_partitions(
            _predict_part,
            model=model,
            raw_score=raw_score,
            pred_proba=pred_proba,
            pred_leaf=pred_leaf,
            pred_contrib=pred_contrib,
            **kwargs
        ).values
903
    elif isinstance(data, dask_Array):
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
        # for multi-class classification with sparse matrices, pred_contrib predictions
        # are returned as a list of sparse matrices (one per class)
        num_classes = model._n_classes or -1

        if (
            num_classes > 2
            and pred_contrib
            and isinstance(data._meta, ss.spmatrix)
        ):

            predict_function = partial(
                _predict_part,
                model=model,
                raw_score=False,
                pred_proba=pred_proba,
                pred_leaf=False,
                pred_contrib=True,
                **kwargs
            )

            delayed_chunks = data.to_delayed()
            bag = dask_bag_from_delayed(delayed_chunks[:, 0])

            @delayed
            def _extract(items: List[Any], i: int) -> Any:
                return items[i]

            preds = bag.map_partitions(predict_function)

            # pred_contrib output will have one column per feature,
            # plus one more for the base value
            num_cols = model.n_features_ + 1

            nrows_per_chunk = data.chunks[0]
938
            out: List[List[dask_Array]] = [[] for _ in range(num_classes)]
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962

            # need to tell Dask the expected type and shape of individual preds
            pred_meta = data._meta

            for j, partition in enumerate(preds.to_delayed()):
                for i in range(num_classes):
                    part = dask_array_from_delayed(
                        value=_extract(partition, i),
                        shape=(nrows_per_chunk[j], num_cols),
                        meta=pred_meta
                    )
                    out[i].append(part)

            # by default, dask.array.concatenate() concatenates sparse arrays into a COO matrix
            # the code below is used instead to ensure that the sparse type is preserved during concatentation
            if isinstance(pred_meta, ss.csr_matrix):
                concat_fn = partial(ss.vstack, format='csr')
            elif isinstance(pred_meta, ss.csc_matrix):
                concat_fn = partial(ss.vstack, format='csc')
            else:
                concat_fn = ss.vstack

            # At this point, `out` is a list of lists of delayeds (each of which points to a matrix).
            # Concatenate them to return a list of Dask Arrays.
963
            out_arrays: List[dask_Array] = []
964
            for i in range(num_classes):
965
966
967
968
969
970
                out_arrays.append(
                    dask_array_from_delayed(
                        value=delayed(concat_fn)(out[i]),
                        shape=(data.shape[0], num_cols),
                        meta=pred_meta
                    )
971
972
                )

973
            return out_arrays
974

975
976
        data_row = client.compute(data[[0]]).result()
        predict_fn = partial(
977
978
979
980
981
982
            _predict_part,
            model=model,
            raw_score=raw_score,
            pred_proba=pred_proba,
            pred_leaf=pred_leaf,
            pred_contrib=pred_contrib,
983
984
985
986
987
988
989
990
991
992
993
994
995
            **kwargs,
        )
        pred_row = predict_fn(data_row)
        chunks = (data.chunks[0],)
        map_blocks_kwargs = {}
        if len(pred_row.shape) > 1:
            chunks += (pred_row.shape[1],)
        else:
            map_blocks_kwargs['drop_axis'] = 1
        return data.map_blocks(
            predict_fn,
            chunks=chunks,
            meta=pred_row,
996
            dtype=dtype,
997
            **map_blocks_kwargs,
998
        )
999
    else:
1000
        raise TypeError(f'Data must be either Dask Array or Dask DataFrame. Got {type(data).__name__}.')
1001
1002


1003
class _DaskLGBMModel:
1004

1005
1006
    @property
    def client_(self) -> Client:
1007
        """:obj:`dask.distributed.Client`: Dask client.
1008
1009
1010
1011
1012
1013
1014
1015
1016

        This property can be passed in the constructor or updated
        with ``model.set_params(client=client)``.
        """
        if not getattr(self, "fitted_", False):
            raise LGBMNotFittedError('Cannot access property client_ before calling fit().')

        return _get_dask_client(client=self.client)

1017
    def _lgb_dask_getstate(self) -> Dict[Any, Any]:
1018
1019
1020
1021
        """Remove un-picklable attributes before serialization."""
        client = self.__dict__.pop("client", None)
        self._other_params.pop("client", None)
        out = deepcopy(self.__dict__)
1022
        out.update({"client": None})
1023
1024
1025
        self.client = client
        return out

1026
    def _lgb_dask_fit(
1027
1028
1029
1030
        self,
        model_factory: Type[LGBMModel],
        X: _DaskMatrixLike,
        y: _DaskCollection,
1031
        sample_weight: Optional[_DaskVectorLike] = None,
1032
        init_score: Optional[_DaskCollection] = None,
1033
        group: Optional[_DaskVectorLike] = None,
1034
1035
        eval_set: Optional[List[Tuple[_DaskMatrixLike, _DaskCollection]]] = None,
        eval_names: Optional[List[str]] = None,
1036
        eval_sample_weight: Optional[List[_DaskVectorLike]] = None,
1037
        eval_class_weight: Optional[List[Union[dict, str]]] = None,
1038
        eval_init_score: Optional[List[_DaskCollection]] = None,
1039
        eval_group: Optional[List[_DaskVectorLike]] = None,
1040
        eval_metric: Optional[Union[_LGBM_ScikitCustomEvalFunction, str, List[Union[_LGBM_ScikitCustomEvalFunction, str]]]] = None,
1041
        eval_at: Optional[Iterable[int]] = None,
1042
1043
        **kwargs: Any
    ) -> "_DaskLGBMModel":
1044
1045
        if not all((DASK_INSTALLED, PANDAS_INSTALLED, SKLEARN_INSTALLED)):
            raise LightGBMError('dask, pandas and scikit-learn are required for lightgbm.dask')
1046
1047

        params = self.get_params(True)
1048
        params.pop("client", None)
1049
1050

        model = _train(
1051
            client=_get_dask_client(self.client),
1052
1053
1054
1055
1056
            data=X,
            label=y,
            params=params,
            model_factory=model_factory,
            sample_weight=sample_weight,
1057
            init_score=init_score,
1058
            group=group,
1059
1060
1061
1062
1063
1064
1065
1066
            eval_set=eval_set,
            eval_names=eval_names,
            eval_sample_weight=eval_sample_weight,
            eval_class_weight=eval_class_weight,
            eval_init_score=eval_init_score,
            eval_group=eval_group,
            eval_metric=eval_metric,
            eval_at=eval_at,
1067
1068
            **kwargs
        )
1069
1070

        self.set_params(**model.get_params())
1071
        self._lgb_dask_copy_extra_params(model, self)
1072
1073
1074

        return self

1075
    def _lgb_dask_to_local(self, model_factory: Type[LGBMModel]) -> LGBMModel:
1076
1077
1078
        params = self.get_params()
        params.pop("client", None)
        model = model_factory(**params)
1079
        self._lgb_dask_copy_extra_params(self, model)
1080
        model._other_params.pop("client", None)
1081
1082
1083
        return model

    @staticmethod
1084
    def _lgb_dask_copy_extra_params(source: Union["_DaskLGBMModel", LGBMModel], dest: Union["_DaskLGBMModel", LGBMModel]) -> None:
1085
1086
1087
1088
        params = source.get_params()
        attributes = source.__dict__
        extra_param_names = set(attributes.keys()).difference(params.keys())
        for name in extra_param_names:
1089
            setattr(dest, name, attributes[name])
1090
1091


1092
class DaskLGBMClassifier(LGBMClassifier, _DaskLGBMModel):
1093
1094
    """Distributed version of lightgbm.LGBMClassifier."""

1095
1096
1097
1098
1099
1100
1101
1102
    def __init__(
        self,
        boosting_type: str = 'gbdt',
        num_leaves: int = 31,
        max_depth: int = -1,
        learning_rate: float = 0.1,
        n_estimators: int = 100,
        subsample_for_bin: int = 200000,
1103
        objective: Optional[Union[str, _LGBM_ScikitCustomObjectiveFunction]] = None,
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
        class_weight: Optional[Union[dict, str]] = None,
        min_split_gain: float = 0.,
        min_child_weight: float = 1e-3,
        min_child_samples: int = 20,
        subsample: float = 1.,
        subsample_freq: int = 0,
        colsample_bytree: float = 1.,
        reg_alpha: float = 0.,
        reg_lambda: float = 0.,
        random_state: Optional[Union[int, np.random.RandomState]] = None,
        n_jobs: int = -1,
        importance_type: str = 'split',
        client: Optional[Client] = None,
        **kwargs: Any
    ):
        """Docstring is inherited from the lightgbm.LGBMClassifier.__init__."""
        self.client = client
        super().__init__(
            boosting_type=boosting_type,
            num_leaves=num_leaves,
            max_depth=max_depth,
            learning_rate=learning_rate,
            n_estimators=n_estimators,
            subsample_for_bin=subsample_for_bin,
            objective=objective,
            class_weight=class_weight,
            min_split_gain=min_split_gain,
            min_child_weight=min_child_weight,
            min_child_samples=min_child_samples,
            subsample=subsample,
            subsample_freq=subsample_freq,
            colsample_bytree=colsample_bytree,
            reg_alpha=reg_alpha,
            reg_lambda=reg_lambda,
            random_state=random_state,
            n_jobs=n_jobs,
            importance_type=importance_type,
            **kwargs
        )

    _base_doc = LGBMClassifier.__init__.__doc__
1145
    _before_kwargs, _kwargs, _after_kwargs = _base_doc.partition('**kwargs')  # type: ignore
1146
    __init__.__doc__ = f"""
1147
1148
1149
1150
        {_before_kwargs}client : dask.distributed.Client or None, optional (default=None)
        {' ':4}Dask client. If ``None``, ``distributed.default_client()`` will be used at runtime. The Dask client used by this class will not be saved if the model object is pickled.
        {_kwargs}{_after_kwargs}
        """
1151
1152

    def __getstate__(self) -> Dict[Any, Any]:
1153
        return self._lgb_dask_getstate()
1154

1155
1156
1157
1158
    def fit(
        self,
        X: _DaskMatrixLike,
        y: _DaskCollection,
1159
        sample_weight: Optional[_DaskVectorLike] = None,
1160
        init_score: Optional[_DaskCollection] = None,
1161
1162
        eval_set: Optional[List[Tuple[_DaskMatrixLike, _DaskCollection]]] = None,
        eval_names: Optional[List[str]] = None,
1163
        eval_sample_weight: Optional[List[_DaskVectorLike]] = None,
1164
        eval_class_weight: Optional[List[Union[dict, str]]] = None,
1165
        eval_init_score: Optional[List[_DaskCollection]] = None,
1166
        eval_metric: Optional[Union[_LGBM_ScikitCustomEvalFunction, str, List[Union[_LGBM_ScikitCustomEvalFunction, str]]]] = None,
1167
1168
        **kwargs: Any
    ) -> "DaskLGBMClassifier":
1169
        """Docstring is inherited from the lightgbm.LGBMClassifier.fit."""
1170
        return self._lgb_dask_fit(
1171
1172
1173
1174
            model_factory=LGBMClassifier,
            X=X,
            y=y,
            sample_weight=sample_weight,
1175
            init_score=init_score,
1176
1177
1178
1179
1180
1181
            eval_set=eval_set,
            eval_names=eval_names,
            eval_sample_weight=eval_sample_weight,
            eval_class_weight=eval_class_weight,
            eval_init_score=eval_init_score,
            eval_metric=eval_metric,
1182
1183
1184
            **kwargs
        )

1185
1186
1187
    _base_doc = _lgbmmodel_doc_fit.format(
        X_shape="Dask Array or Dask DataFrame of shape = [n_samples, n_features]",
        y_shape="Dask Array, Dask DataFrame or Dask Series of shape = [n_samples]",
1188
        sample_weight_shape="Dask Array or Dask Series of shape = [n_samples] or None, optional (default=None)",
1189
        init_score_shape="Dask Array or Dask Series of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task), or Dask Array or Dask DataFrame of shape = [n_samples, n_classes] (for multi-class task), or None, optional (default=None)",
1190
        group_shape="Dask Array or Dask Series or None, optional (default=None)",
1191
        eval_sample_weight_shape="list of Dask Array or Dask Series, or None, optional (default=None)",
1192
        eval_init_score_shape="list of Dask Array, Dask Series or Dask DataFrame (for multi-class task), or None, optional (default=None)",
1193
        eval_group_shape="list of Dask Array or Dask Series, or None, optional (default=None)"
1194
1195
    )

1196
    # DaskLGBMClassifier does not support group, eval_group.
1197
    _base_doc = (_base_doc[:_base_doc.find('group :')]
1198
1199
1200
1201
1202
                 + _base_doc[_base_doc.find('eval_set :'):])

    _base_doc = (_base_doc[:_base_doc.find('eval_group :')]
                 + _base_doc[_base_doc.find('eval_metric :'):])

1203
    # DaskLGBMClassifier support for callbacks and init_model is not tested
1204
1205
    fit.__doc__ = f"""{_base_doc[:_base_doc.find('callbacks :')]}**kwargs
        Other parameters passed through to ``LGBMClassifier.fit()``.
1206

1207
1208
1209
1210
1211
    Returns
    -------
    self : lightgbm.DaskLGBMClassifier
        Returns self.

1212
    {_lgbmmodel_doc_custom_eval_note}
1213
        """
1214

1215
    def predict(self, X: _DaskMatrixLike, **kwargs: Any) -> dask_Array:
1216
        """Docstring is inherited from the lightgbm.LGBMClassifier.predict."""
1217
1218
1219
1220
        return _predict(
            model=self.to_local(),
            data=X,
            dtype=self.classes_.dtype,
1221
            client=_get_dask_client(self.client),
1222
1223
1224
            **kwargs
        )

1225
1226
1227
1228
1229
1230
    predict.__doc__ = _lgbmmodel_doc_predict.format(
        description="Return the predicted value for each sample.",
        X_shape="Dask Array or Dask DataFrame of shape = [n_samples, n_features]",
        output_name="predicted_result",
        predicted_result_shape="Dask Array of shape = [n_samples] or shape = [n_samples, n_classes]",
        X_leaves_shape="Dask Array of shape = [n_samples, n_trees] or shape = [n_samples, n_trees * n_classes]",
1231
        X_SHAP_values_shape="Dask Array of shape = [n_samples, n_features + 1] or shape = [n_samples, (n_features + 1) * n_classes] or (if multi-class and using sparse inputs) a list of ``n_classes`` Dask Arrays of shape = [n_samples, n_features + 1]"
1232
    )
1233

1234
    def predict_proba(self, X: _DaskMatrixLike, **kwargs: Any) -> dask_Array:
1235
        """Docstring is inherited from the lightgbm.LGBMClassifier.predict_proba."""
1236
1237
1238
1239
        return _predict(
            model=self.to_local(),
            data=X,
            pred_proba=True,
1240
            client=_get_dask_client(self.client),
1241
1242
1243
            **kwargs
        )

1244
1245
1246
1247
    predict_proba.__doc__ = _lgbmmodel_doc_predict.format(
        description="Return the predicted probability for each class for each sample.",
        X_shape="Dask Array or Dask DataFrame of shape = [n_samples, n_features]",
        output_name="predicted_probability",
1248
        predicted_result_shape="Dask Array of shape = [n_samples] or shape = [n_samples, n_classes]",
1249
        X_leaves_shape="Dask Array of shape = [n_samples, n_trees] or shape = [n_samples, n_trees * n_classes]",
1250
        X_SHAP_values_shape="Dask Array of shape = [n_samples, n_features + 1] or shape = [n_samples, (n_features + 1) * n_classes] or (if multi-class and using sparse inputs) a list of ``n_classes`` Dask Arrays of shape = [n_samples, n_features + 1]"
1251
    )
1252

1253
    def to_local(self) -> LGBMClassifier:
1254
1255
1256
1257
1258
        """Create regular version of lightgbm.LGBMClassifier from the distributed version.

        Returns
        -------
        model : lightgbm.LGBMClassifier
1259
            Local underlying model.
1260
        """
1261
        return self._lgb_dask_to_local(LGBMClassifier)
1262
1263


1264
class DaskLGBMRegressor(LGBMRegressor, _DaskLGBMModel):
1265
    """Distributed version of lightgbm.LGBMRegressor."""
1266

1267
1268
1269
1270
1271
1272
1273
1274
    def __init__(
        self,
        boosting_type: str = 'gbdt',
        num_leaves: int = 31,
        max_depth: int = -1,
        learning_rate: float = 0.1,
        n_estimators: int = 100,
        subsample_for_bin: int = 200000,
1275
        objective: Optional[Union[str, _LGBM_ScikitCustomObjectiveFunction]] = None,
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
        class_weight: Optional[Union[dict, str]] = None,
        min_split_gain: float = 0.,
        min_child_weight: float = 1e-3,
        min_child_samples: int = 20,
        subsample: float = 1.,
        subsample_freq: int = 0,
        colsample_bytree: float = 1.,
        reg_alpha: float = 0.,
        reg_lambda: float = 0.,
        random_state: Optional[Union[int, np.random.RandomState]] = None,
        n_jobs: int = -1,
        importance_type: str = 'split',
        client: Optional[Client] = None,
        **kwargs: Any
    ):
        """Docstring is inherited from the lightgbm.LGBMRegressor.__init__."""
        self.client = client
        super().__init__(
            boosting_type=boosting_type,
            num_leaves=num_leaves,
            max_depth=max_depth,
            learning_rate=learning_rate,
            n_estimators=n_estimators,
            subsample_for_bin=subsample_for_bin,
            objective=objective,
            class_weight=class_weight,
            min_split_gain=min_split_gain,
            min_child_weight=min_child_weight,
            min_child_samples=min_child_samples,
            subsample=subsample,
            subsample_freq=subsample_freq,
            colsample_bytree=colsample_bytree,
            reg_alpha=reg_alpha,
            reg_lambda=reg_lambda,
            random_state=random_state,
            n_jobs=n_jobs,
            importance_type=importance_type,
            **kwargs
        )

    _base_doc = LGBMRegressor.__init__.__doc__
1317
    _before_kwargs, _kwargs, _after_kwargs = _base_doc.partition('**kwargs')  # type: ignore
1318
    __init__.__doc__ = f"""
1319
1320
1321
1322
        {_before_kwargs}client : dask.distributed.Client or None, optional (default=None)
        {' ':4}Dask client. If ``None``, ``distributed.default_client()`` will be used at runtime. The Dask client used by this class will not be saved if the model object is pickled.
        {_kwargs}{_after_kwargs}
        """
1323

1324
    def __getstate__(self) -> Dict[Any, Any]:
1325
        return self._lgb_dask_getstate()
1326

1327
1328
1329
1330
    def fit(
        self,
        X: _DaskMatrixLike,
        y: _DaskCollection,
1331
1332
        sample_weight: Optional[_DaskVectorLike] = None,
        init_score: Optional[_DaskVectorLike] = None,
1333
1334
        eval_set: Optional[List[Tuple[_DaskMatrixLike, _DaskCollection]]] = None,
        eval_names: Optional[List[str]] = None,
1335
1336
        eval_sample_weight: Optional[List[_DaskVectorLike]] = None,
        eval_init_score: Optional[List[_DaskVectorLike]] = None,
1337
        eval_metric: Optional[Union[_LGBM_ScikitCustomEvalFunction, str, List[Union[_LGBM_ScikitCustomEvalFunction, str]]]] = None,
1338
1339
        **kwargs: Any
    ) -> "DaskLGBMRegressor":
1340
        """Docstring is inherited from the lightgbm.LGBMRegressor.fit."""
1341
        return self._lgb_dask_fit(
1342
1343
1344
1345
            model_factory=LGBMRegressor,
            X=X,
            y=y,
            sample_weight=sample_weight,
1346
            init_score=init_score,
1347
1348
1349
1350
1351
            eval_set=eval_set,
            eval_names=eval_names,
            eval_sample_weight=eval_sample_weight,
            eval_init_score=eval_init_score,
            eval_metric=eval_metric,
1352
1353
1354
            **kwargs
        )

1355
1356
1357
    _base_doc = _lgbmmodel_doc_fit.format(
        X_shape="Dask Array or Dask DataFrame of shape = [n_samples, n_features]",
        y_shape="Dask Array, Dask DataFrame or Dask Series of shape = [n_samples]",
1358
1359
        sample_weight_shape="Dask Array or Dask Series of shape = [n_samples] or None, optional (default=None)",
        init_score_shape="Dask Array or Dask Series of shape = [n_samples] or None, optional (default=None)",
1360
        group_shape="Dask Array or Dask Series or None, optional (default=None)",
1361
1362
1363
        eval_sample_weight_shape="list of Dask Array or Dask Series, or None, optional (default=None)",
        eval_init_score_shape="list of Dask Array or Dask Series, or None, optional (default=None)",
        eval_group_shape="list of Dask Array or Dask Series, or None, optional (default=None)"
1364
1365
    )

1366
    # DaskLGBMRegressor does not support group, eval_class_weight, eval_group.
1367
    _base_doc = (_base_doc[:_base_doc.find('group :')]
1368
1369
1370
1371
1372
1373
1374
1375
                 + _base_doc[_base_doc.find('eval_set :'):])

    _base_doc = (_base_doc[:_base_doc.find('eval_class_weight :')]
                 + _base_doc[_base_doc.find('eval_init_score :'):])

    _base_doc = (_base_doc[:_base_doc.find('eval_group :')]
                 + _base_doc[_base_doc.find('eval_metric :'):])

1376
    # DaskLGBMRegressor support for callbacks and init_model is not tested
1377
1378
    fit.__doc__ = f"""{_base_doc[:_base_doc.find('callbacks :')]}**kwargs
        Other parameters passed through to ``LGBMRegressor.fit()``.
1379

1380
1381
1382
1383
1384
    Returns
    -------
    self : lightgbm.DaskLGBMRegressor
        Returns self.

1385
    {_lgbmmodel_doc_custom_eval_note}
1386
        """
1387

1388
    def predict(self, X: _DaskMatrixLike, **kwargs) -> dask_Array:
1389
        """Docstring is inherited from the lightgbm.LGBMRegressor.predict."""
1390
1391
1392
        return _predict(
            model=self.to_local(),
            data=X,
1393
            client=_get_dask_client(self.client),
1394
1395
1396
            **kwargs
        )

1397
1398
1399
1400
1401
1402
1403
1404
    predict.__doc__ = _lgbmmodel_doc_predict.format(
        description="Return the predicted value for each sample.",
        X_shape="Dask Array or Dask DataFrame of shape = [n_samples, n_features]",
        output_name="predicted_result",
        predicted_result_shape="Dask Array of shape = [n_samples]",
        X_leaves_shape="Dask Array of shape = [n_samples, n_trees]",
        X_SHAP_values_shape="Dask Array of shape = [n_samples, n_features + 1]"
    )
1405

1406
    def to_local(self) -> LGBMRegressor:
1407
1408
1409
1410
1411
        """Create regular version of lightgbm.LGBMRegressor from the distributed version.

        Returns
        -------
        model : lightgbm.LGBMRegressor
1412
            Local underlying model.
1413
        """
1414
        return self._lgb_dask_to_local(LGBMRegressor)
1415
1416


1417
class DaskLGBMRanker(LGBMRanker, _DaskLGBMModel):
1418
    """Distributed version of lightgbm.LGBMRanker."""
1419

1420
1421
1422
1423
1424
1425
1426
1427
    def __init__(
        self,
        boosting_type: str = 'gbdt',
        num_leaves: int = 31,
        max_depth: int = -1,
        learning_rate: float = 0.1,
        n_estimators: int = 100,
        subsample_for_bin: int = 200000,
1428
        objective: Optional[Union[str, _LGBM_ScikitCustomObjectiveFunction]] = None,
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
        class_weight: Optional[Union[dict, str]] = None,
        min_split_gain: float = 0.,
        min_child_weight: float = 1e-3,
        min_child_samples: int = 20,
        subsample: float = 1.,
        subsample_freq: int = 0,
        colsample_bytree: float = 1.,
        reg_alpha: float = 0.,
        reg_lambda: float = 0.,
        random_state: Optional[Union[int, np.random.RandomState]] = None,
        n_jobs: int = -1,
        importance_type: str = 'split',
        client: Optional[Client] = None,
        **kwargs: Any
    ):
        """Docstring is inherited from the lightgbm.LGBMRanker.__init__."""
        self.client = client
        super().__init__(
            boosting_type=boosting_type,
            num_leaves=num_leaves,
            max_depth=max_depth,
            learning_rate=learning_rate,
            n_estimators=n_estimators,
            subsample_for_bin=subsample_for_bin,
            objective=objective,
            class_weight=class_weight,
            min_split_gain=min_split_gain,
            min_child_weight=min_child_weight,
            min_child_samples=min_child_samples,
            subsample=subsample,
            subsample_freq=subsample_freq,
            colsample_bytree=colsample_bytree,
            reg_alpha=reg_alpha,
            reg_lambda=reg_lambda,
            random_state=random_state,
            n_jobs=n_jobs,
            importance_type=importance_type,
            **kwargs
        )

    _base_doc = LGBMRanker.__init__.__doc__
1470
    _before_kwargs, _kwargs, _after_kwargs = _base_doc.partition('**kwargs')  # type: ignore
1471
    __init__.__doc__ = f"""
1472
1473
1474
1475
        {_before_kwargs}client : dask.distributed.Client or None, optional (default=None)
        {' ':4}Dask client. If ``None``, ``distributed.default_client()`` will be used at runtime. The Dask client used by this class will not be saved if the model object is pickled.
        {_kwargs}{_after_kwargs}
        """
1476
1477

    def __getstate__(self) -> Dict[Any, Any]:
1478
        return self._lgb_dask_getstate()
1479

1480
1481
1482
1483
    def fit(
        self,
        X: _DaskMatrixLike,
        y: _DaskCollection,
1484
1485
1486
        sample_weight: Optional[_DaskVectorLike] = None,
        init_score: Optional[_DaskVectorLike] = None,
        group: Optional[_DaskVectorLike] = None,
1487
1488
        eval_set: Optional[List[Tuple[_DaskMatrixLike, _DaskCollection]]] = None,
        eval_names: Optional[List[str]] = None,
1489
1490
1491
        eval_sample_weight: Optional[List[_DaskVectorLike]] = None,
        eval_init_score: Optional[List[_DaskVectorLike]] = None,
        eval_group: Optional[List[_DaskVectorLike]] = None,
1492
        eval_metric: Optional[Union[_LGBM_ScikitCustomEvalFunction, str, List[Union[_LGBM_ScikitCustomEvalFunction, str]]]] = None,
1493
        eval_at: Iterable[int] = (1, 2, 3, 4, 5),
1494
1495
        **kwargs: Any
    ) -> "DaskLGBMRanker":
1496
        """Docstring is inherited from the lightgbm.LGBMRanker.fit."""
1497
        return self._lgb_dask_fit(
1498
1499
1500
1501
            model_factory=LGBMRanker,
            X=X,
            y=y,
            sample_weight=sample_weight,
1502
            init_score=init_score,
1503
            group=group,
1504
1505
1506
1507
1508
1509
1510
            eval_set=eval_set,
            eval_names=eval_names,
            eval_sample_weight=eval_sample_weight,
            eval_init_score=eval_init_score,
            eval_group=eval_group,
            eval_metric=eval_metric,
            eval_at=eval_at,
1511
1512
1513
            **kwargs
        )

1514
1515
1516
    _base_doc = _lgbmmodel_doc_fit.format(
        X_shape="Dask Array or Dask DataFrame of shape = [n_samples, n_features]",
        y_shape="Dask Array, Dask DataFrame or Dask Series of shape = [n_samples]",
1517
1518
        sample_weight_shape="Dask Array or Dask Series of shape = [n_samples] or None, optional (default=None)",
        init_score_shape="Dask Array or Dask Series of shape = [n_samples] or None, optional (default=None)",
1519
        group_shape="Dask Array or Dask Series or None, optional (default=None)",
1520
1521
1522
        eval_sample_weight_shape="list of Dask Array or Dask Series, or None, optional (default=None)",
        eval_init_score_shape="list of Dask Array or Dask Series, or None, optional (default=None)",
        eval_group_shape="list of Dask Array or Dask Series, or None, optional (default=None)"
1523
1524
    )

1525
1526
1527
1528
    # DaskLGBMRanker does not support eval_class_weight or early stopping
    _base_doc = (_base_doc[:_base_doc.find('eval_class_weight :')]
                 + _base_doc[_base_doc.find('eval_init_score :'):])

1529
    _base_doc = (_base_doc[:_base_doc.find('feature_name :')]
1530
1531
                 + "eval_at : iterable of int, optional (default=(1, 2, 3, 4, 5))\n"
                 + f"{' ':8}The evaluation positions of the specified metric.\n"
1532
                 + f"{' ':4}{_base_doc[_base_doc.find('feature_name :'):]}")
1533
1534

    # DaskLGBMRanker support for callbacks and init_model is not tested
1535
1536
    fit.__doc__ = f"""{_base_doc[:_base_doc.find('callbacks :')]}**kwargs
        Other parameters passed through to ``LGBMRanker.fit()``.
1537

1538
1539
1540
1541
1542
    Returns
    -------
    self : lightgbm.DaskLGBMRanker
        Returns self.

1543
    {_lgbmmodel_doc_custom_eval_note}
1544
        """
1545

1546
    def predict(self, X: _DaskMatrixLike, **kwargs: Any) -> dask_Array:
1547
        """Docstring is inherited from the lightgbm.LGBMRanker.predict."""
1548
1549
1550
1551
1552
1553
        return _predict(
            model=self.to_local(),
            data=X,
            client=_get_dask_client(self.client),
            **kwargs
        )
1554

1555
1556
1557
1558
1559
1560
1561
1562
    predict.__doc__ = _lgbmmodel_doc_predict.format(
        description="Return the predicted value for each sample.",
        X_shape="Dask Array or Dask DataFrame of shape = [n_samples, n_features]",
        output_name="predicted_result",
        predicted_result_shape="Dask Array of shape = [n_samples]",
        X_leaves_shape="Dask Array of shape = [n_samples, n_trees]",
        X_SHAP_values_shape="Dask Array of shape = [n_samples, n_features + 1]"
    )
1563

1564
    def to_local(self) -> LGBMRanker:
1565
1566
1567
1568
1569
        """Create regular version of lightgbm.LGBMRanker from the distributed version.

        Returns
        -------
        model : lightgbm.LGBMRanker
1570
            Local underlying model.
1571
        """
1572
        return self._lgb_dask_to_local(LGBMRanker)