dask.py 39.8 KB
Newer Older
1
# coding: utf-8
2
"""Distributed training with LightGBM and dask.distributed.
3

4
This module enables you to perform distributed training with LightGBM on
5
dask.Array and dask.DataFrame collections.
6
7

It is based on dask-lightgbm, which was based on dask-xgboost.
8
"""
9
import socket
10
from collections import defaultdict
11
from copy import deepcopy
12
from typing import Any, Callable, Dict, List, Optional, Type, Union
13
14
15
from urllib.parse import urlparse

import numpy as np
16
17
import scipy.sparse as ss

18
from .basic import _LIB, LightGBMError, _choose_param_value, _ConfigAliases, _log_info, _log_warning, _safe_call
19
from .compat import (DASK_INSTALLED, PANDAS_INSTALLED, SKLEARN_INSTALLED, Client, LGBMNotFittedError, concat,
20
                     dask_Array, dask_DataFrame, dask_Series, default_client, delayed, pd_DataFrame, pd_Series, wait)
21
from .sklearn import LGBMClassifier, LGBMModel, LGBMRanker, LGBMRegressor, _lgbmmodel_doc_fit, _lgbmmodel_doc_predict
22
23
24

_DaskCollection = Union[dask_Array, dask_DataFrame, dask_Series]
_DaskMatrixLike = Union[dask_Array, dask_DataFrame]
25
_DaskVectorLike = Union[dask_Array, dask_Series]
26
27
_DaskPart = Union[np.ndarray, pd_DataFrame, pd_Series, ss.spmatrix]
_PredictionDtype = Union[Type[np.float32], Type[np.float64], Type[np.int32], Type[np.int64]]
28
29


30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
def _get_dask_client(client: Optional[Client]) -> Client:
    """Choose a Dask client to use.

    Parameters
    ----------
    client : dask.distributed.Client or None
        Dask client.

    Returns
    -------
    client : dask.distributed.Client
        A Dask client.
    """
    if client is None:
        return default_client()
    else:
        return client


49
50
def _find_random_open_port() -> int:
    """Find a random open port on localhost.
51
52
53

    Returns
    -------
54
    port : int
55
        A free port on localhost
56
    """
57
58
59
60
    with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
        s.bind(('', 0))
        port = s.getsockname()[1]
    return port
61
62


63
def _concat(seq: List[_DaskPart]) -> _DaskPart:
64
65
    if isinstance(seq[0], np.ndarray):
        return np.concatenate(seq, axis=0)
66
    elif isinstance(seq[0], (pd_DataFrame, pd_Series)):
67
        return concat(seq, axis=0)
68
69
70
    elif isinstance(seq[0], ss.spmatrix):
        return ss.vstack(seq, format='csr')
    else:
71
        raise TypeError(f'Data must be one of: numpy arrays, pandas dataframes, sparse matrices (from scipy). Got {type(seq[0])}.')
72
73


74
75
76
77
def _train_part(
    params: Dict[str, Any],
    model_factory: Type[LGBMModel],
    list_of_parts: List[Dict[str, _DaskPart]],
78
79
80
    machines: str,
    local_listen_port: int,
    num_machines: int,
81
82
83
84
    return_model: bool,
    time_out: int = 120,
    **kwargs: Any
) -> Optional[LGBMModel]:
85
    network_params = {
86
87
        'machines': machines,
        'local_listen_port': local_listen_port,
88
        'time_out': time_out,
89
        'num_machines': num_machines
90
    }
91
92
    params.update(network_params)

93
94
    is_ranker = issubclass(model_factory, LGBMRanker)

95
    # Concatenate many parts into one
96
97
98
99
100
101
102
103
104
105
106
107
    data = _concat([x['data'] for x in list_of_parts])
    label = _concat([x['label'] for x in list_of_parts])

    if 'weight' in list_of_parts[0]:
        weight = _concat([x['weight'] for x in list_of_parts])
    else:
        weight = None

    if 'group' in list_of_parts[0]:
        group = _concat([x['group'] for x in list_of_parts])
    else:
        group = None
108

109
110
111
112
113
    if 'init_score' in list_of_parts[0]:
        init_score = _concat([x['init_score'] for x in list_of_parts])
    else:
        init_score = None

114
115
    try:
        model = model_factory(**params)
116
        if is_ranker:
117
            model.fit(data, label, sample_weight=weight, init_score=init_score, group=group, **kwargs)
118
        else:
119
            model.fit(data, label, sample_weight=weight, init_score=init_score, **kwargs)
120

121
122
123
124
125
126
    finally:
        _safe_call(_LIB.LGBM_NetworkFree())

    return model if return_model else None


127
def _split_to_parts(data: _DaskCollection, is_matrix: bool) -> List[_DaskPart]:
128
129
    parts = data.to_delayed()
    if isinstance(parts, np.ndarray):
130
131
132
133
        if is_matrix:
            assert parts.shape[1] == 1
        else:
            assert parts.ndim == 1 or parts.shape[1] == 1
134
135
136
137
        parts = parts.flatten().tolist()
    return parts


138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
def _machines_to_worker_map(machines: str, worker_addresses: List[str]) -> Dict[str, int]:
    """Create a worker_map from machines list.

    Given ``machines`` and a list of Dask worker addresses, return a mapping where the keys are
    ``worker_addresses`` and the values are ports from ``machines``.

    Parameters
    ----------
    machines : str
        A comma-delimited list of workers, of the form ``ip1:port,ip2:port``.
    worker_addresses : list of str
        A list of Dask worker addresses, of the form ``{protocol}{hostname}:{port}``, where ``port`` is the port Dask's scheduler uses to talk to that worker.

    Returns
    -------
    result : Dict[str, int]
        Dictionary where keys are work addresses in the form expected by Dask and values are a port for LightGBM to use.
    """
    machine_addresses = machines.split(",")
157
158
159
160

    if len(set(machine_addresses)) != len(machine_addresses):
        raise ValueError(f"Found duplicates in 'machines' ({machines}). Each entry in 'machines' must be a unique IP-port combination.")

161
162
163
164
165
166
167
168
169
170
171
172
173
    machine_to_port = defaultdict(set)
    for address in machine_addresses:
        host, port = address.split(":")
        machine_to_port[host].add(int(port))

    out = {}
    for address in worker_addresses:
        worker_host = urlparse(address).hostname
        out[address] = machine_to_port[worker_host].pop()

    return out


174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
def _possibly_fix_worker_map_duplicates(worker_map: Dict[str, int], client: Client) -> Dict[str, int]:
    """Fix any duplicate IP-port pairs in a ``worker_map``."""
    worker_map = deepcopy(worker_map)
    workers_that_need_new_ports = []
    host_to_port = defaultdict(set)
    for worker, port in worker_map.items():
        host = urlparse(worker).hostname
        if port in host_to_port[host]:
            workers_that_need_new_ports.append(worker)
        else:
            host_to_port[host].add(port)

    # if any duplicates were found, search for new ports one by one
    for worker in workers_that_need_new_ports:
        _log_info(f"Searching for a LightGBM training port for worker '{worker}'")
        host = urlparse(worker).hostname
        retries_remaining = 100
        while retries_remaining > 0:
            retries_remaining -= 1
            new_port = client.submit(
                _find_random_open_port,
                workers=[worker],
                allow_other_workers=False,
                pure=False
            ).result()
            if new_port not in host_to_port[host]:
                worker_map[worker] = new_port
                host_to_port[host].add(new_port)
                break

        if retries_remaining == 0:
            raise LightGBMError(
                "Failed to find an open port. Try re-running training or explicitly setting 'machines' or 'local_listen_port'."
            )

    return worker_map


212
213
214
215
216
217
def _train(
    client: Client,
    data: _DaskMatrixLike,
    label: _DaskCollection,
    params: Dict[str, Any],
    model_factory: Type[LGBMModel],
218
219
220
    sample_weight: Optional[_DaskVectorLike] = None,
    init_score: Optional[_DaskVectorLike] = None,
    group: Optional[_DaskVectorLike] = None,
221
222
    **kwargs: Any
) -> LGBMModel:
223
224
225
226
    """Inner train routine.

    Parameters
    ----------
227
228
    client : dask.distributed.Client
        Dask client.
229
    data : Dask Array or Dask DataFrame of shape = [n_samples, n_features]
230
        Input feature matrix.
231
    label : Dask Array, Dask DataFrame or Dask Series of shape = [n_samples]
232
233
        The target values (class labels in classification, real numbers in regression).
    params : dict
234
        Parameters passed to constructor of the local underlying model.
235
    model_factory : lightgbm.LGBMClassifier, lightgbm.LGBMRegressor, or lightgbm.LGBMRanker class
236
        Class of the local underlying model.
237
    sample_weight : Dask Array or Dask Series of shape = [n_samples] or None, optional (default=None)
238
        Weights of training data.
239
    init_score : Dask Array or Dask Series of shape = [n_samples] or None, optional (default=None)
240
        Init score of training data.
241
    group : Dask Array or Dask Series or None, optional (default=None)
242
243
244
245
246
        Group/query data.
        Only used in the learning-to-rank task.
        sum(group) = n_samples.
        For example, if you have a 100-document dataset with ``group = [10, 20, 40, 10, 10, 10]``, that means that you have 6 groups,
        where the first 10 records are in the first group, records 11-30 are in the second group, records 31-70 are in the third group, etc.
247
248
249
250
251
252
253
    **kwargs
        Other parameters passed to ``fit`` method of the local underlying model.

    Returns
    -------
    model : lightgbm.LGBMClassifier, lightgbm.LGBMRegressor, or lightgbm.LGBMRanker class
        Returns fitted underlying model.
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282

    Note
    ----

    This method handles setting up the following network parameters based on information
    about the Dask cluster referenced by ``client``.

    * ``local_listen_port``: port that each LightGBM worker opens a listening socket on,
            to accept connections from other workers. This can differ from LightGBM worker
            to LightGBM worker, but does not have to.
    * ``machines``: a comma-delimited list of all workers in the cluster, in the
            form ``ip:port,ip:port``. If running multiple Dask workers on the same host, use different
            ports for each worker. For example, for ``LocalCluster(n_workers=3)``, you might
            pass ``"127.0.0.1:12400,127.0.0.1:12401,127.0.0.1:12402"``.
    * ``num_machines``: number of LightGBM workers.
    * ``timeout``: time in minutes to wait before closing unused sockets.

    The default behavior of this function is to generate ``machines`` from the list of
    Dask workers which hold some piece of the training data, and to search for an open
    port on each worker to be used as ``local_listen_port``.

    If ``machines`` is provided explicitly in ``params``, this function uses the hosts
    and ports in that list directly, and does not do any searching. This means that if
    any of the Dask workers are missing from the list or any of those ports are not free
    when training starts, training will fail.

    If ``local_listen_port`` is provided in ``params`` and ``machines`` is not, this function
    constructs ``machines`` from the list of Dask workers which hold some piece of the
    training data, assuming that each one will use the same ``local_listen_port``.
283
    """
284
285
    params = deepcopy(params)

286
287
288
289
290
291
292
293
    # capture whether local_listen_port or its aliases were provided
    listen_port_in_params = any(
        alias in params for alias in _ConfigAliases.get("local_listen_port")
    )

    # capture whether machines or its aliases were provided
    machines_in_params = any(
        alias in params for alias in _ConfigAliases.get("machines")
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
    )

    params = _choose_param_value(
        main_param_name="tree_learner",
        params=params,
        default_value="data"
    )
    allowed_tree_learners = {
        'data',
        'data_parallel',
        'feature',
        'feature_parallel',
        'voting',
        'voting_parallel'
    }
    if params["tree_learner"] not in allowed_tree_learners:
310
        _log_warning(f'Parameter tree_learner set to {params["tree_learner"]}, which is not allowed. Using "data" as default')
311
312
313
314
315
        params['tree_learner'] = 'data'

    # Some passed-in parameters can be removed:
    #   * 'num_machines': set automatically from Dask worker list
    #   * 'num_threads': overridden to match nthreads on each Dask process
316
317
318
319
    for param_alias in _ConfigAliases.get('num_machines', 'num_threads'):
        if param_alias in params:
            _log_warning(f"Parameter {param_alias} will be ignored.")
            params.pop(param_alias)
320

321
    # Split arrays/dataframes into parts. Arrange parts into dicts to enforce co-locality
322
323
    data_parts = _split_to_parts(data=data, is_matrix=True)
    label_parts = _split_to_parts(data=label, is_matrix=False)
324
    parts = [{'data': x, 'label': y} for (x, y) in zip(data_parts, label_parts)]
325
    n_parts = len(parts)
326
327
328

    if sample_weight is not None:
        weight_parts = _split_to_parts(data=sample_weight, is_matrix=False)
329
        for i in range(n_parts):
330
            parts[i]['weight'] = weight_parts[i]
331
332
333

    if group is not None:
        group_parts = _split_to_parts(data=group, is_matrix=False)
334
        for i in range(n_parts):
335
            parts[i]['group'] = group_parts[i]
336

337
338
339
340
341
    if init_score is not None:
        init_score_parts = _split_to_parts(data=init_score, is_matrix=False)
        for i in range(n_parts):
            parts[i]['init_score'] = init_score_parts[i]

342
    # Start computation in the background
343
    parts = list(map(delayed, parts))
344
345
346
347
    parts = client.compute(parts)
    wait(parts)

    for part in parts:
348
        if part.status == 'error':  # type: ignore
349
350
351
            return part  # trigger error locally

    # Find locations of all parts and map them to particular Dask workers
352
    key_to_part_dict = {part.key: part for part in parts}  # type: ignore
353
354
355
356
357
358
359
360
    who_has = client.who_has(parts)
    worker_map = defaultdict(list)
    for key, workers in who_has.items():
        worker_map[next(iter(workers))].append(key_to_part_dict[key])

    master_worker = next(iter(worker_map))
    worker_ncores = client.ncores()

361
362
363
364
365
366
    # resolve aliases for network parameters and pop the result off params.
    # these values are added back in calls to `_train_part()`
    params = _choose_param_value(
        main_param_name="local_listen_port",
        params=params,
        default_value=12400
367
    )
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
    local_listen_port = params.pop("local_listen_port")

    params = _choose_param_value(
        main_param_name="machines",
        params=params,
        default_value=None
    )
    machines = params.pop("machines")

    # figure out network params
    worker_addresses = worker_map.keys()
    if machines is not None:
        _log_info("Using passed-in 'machines' parameter")
        worker_address_to_port = _machines_to_worker_map(
            machines=machines,
            worker_addresses=worker_addresses
        )
    else:
        if listen_port_in_params:
            _log_info("Using passed-in 'local_listen_port' for all workers")
            unique_hosts = set(urlparse(a).hostname for a in worker_addresses)
            if len(unique_hosts) < len(worker_addresses):
                msg = (
                    "'local_listen_port' was provided in Dask training parameters, but at least one "
                    "machine in the cluster has multiple Dask worker processes running on it. Please omit "
                    "'local_listen_port' or pass 'machines'."
                )
                raise LightGBMError(msg)

            worker_address_to_port = {
                address: local_listen_port
                for address in worker_addresses
            }
        else:
            _log_info("Finding random open ports for workers")
403
404
405
406
            # this approach with client.run() is faster than searching for ports
            # serially, but can produce duplicates sometimes. Try the fast approach one
            # time, then pass it through a function that will use a slower but more reliable
            # approach if duplicates are found.
407
408
409
            worker_address_to_port = client.run(
                _find_random_open_port,
                workers=list(worker_addresses)
410
            )
411
412
413
414
415
            worker_address_to_port = _possibly_fix_worker_map_duplicates(
                worker_map=worker_address_to_port,
                client=client
            )

416
        machines = ','.join([
417
            f'{urlparse(worker_address).hostname}:{port}'
418
419
420
421
422
            for worker_address, port
            in worker_address_to_port.items()
        ])

    num_machines = len(worker_address_to_port)
423

424
    # Tell each worker to train on the parts that it has locally
425
    #
426
    # This code treats ``_train_part()`` calls as not "pure" because:
427
    #     1. there is randomness in the training process unless parameters ``seed``
428
    #        and ``deterministic`` are set
429
430
431
    #     2. even with those parameters set, the output of one ``_train_part()`` call
    #        relies on global state (it and all the other LightGBM training processes
    #        coordinate with each other)
432
433
434
435
436
437
    futures_classifiers = [
        client.submit(
            _train_part,
            model_factory=model_factory,
            params={**params, 'num_threads': worker_ncores[worker]},
            list_of_parts=list_of_parts,
438
439
440
            machines=machines,
            local_listen_port=worker_address_to_port[worker],
            num_machines=num_machines,
441
442
            time_out=params.get('time_out', 120),
            return_model=(worker == master_worker),
443
444
445
            workers=[worker],
            allow_other_workers=False,
            pure=False,
446
447
448
449
            **kwargs
        )
        for worker, list_of_parts in worker_map.items()
    ]
450
451
452

    results = client.gather(futures_classifiers)
    results = [v for v in results if v]
453
454
455
    model = results[0]

    # if network parameters were changed during training, remove them from the
Andrew Ziem's avatar
Andrew Ziem committed
456
    # returned model so that they're generated dynamically on every run based
457
458
459
460
461
462
463
464
465
466
467
468
469
470
    # on the Dask cluster you're connected to and which workers have pieces of
    # the training data
    if not listen_port_in_params:
        for param in _ConfigAliases.get('local_listen_port'):
            model._other_params.pop(param, None)

    if not machines_in_params:
        for param in _ConfigAliases.get('machines'):
            model._other_params.pop(param, None)

    for param in _ConfigAliases.get('num_machines', 'timeout'):
        model._other_params.pop(param, None)

    return model
471
472


473
474
475
476
477
478
479
480
481
def _predict_part(
    part: _DaskPart,
    model: LGBMModel,
    raw_score: bool,
    pred_proba: bool,
    pred_leaf: bool,
    pred_contrib: bool,
    **kwargs: Any
) -> _DaskPart:
482

483
    if part.shape[0] == 0:
484
        result = np.array([])
485
486
    elif pred_proba:
        result = model.predict_proba(
487
            part,
488
489
490
491
492
            raw_score=raw_score,
            pred_leaf=pred_leaf,
            pred_contrib=pred_contrib,
            **kwargs
        )
493
    else:
494
        result = model.predict(
495
            part,
496
497
498
499
500
            raw_score=raw_score,
            pred_leaf=pred_leaf,
            pred_contrib=pred_contrib,
            **kwargs
        )
501

502
    # dask.DataFrame.map_partitions() expects each call to return a pandas DataFrame or Series
503
    if isinstance(part, pd_DataFrame):
504
        if len(result.shape) == 2:
505
            result = pd_DataFrame(result, index=part.index)
506
        else:
507
            result = pd_Series(result, index=part.index, name='predictions')
508
509
510
511

    return result


512
513
514
515
516
517
518
519
520
521
def _predict(
    model: LGBMModel,
    data: _DaskMatrixLike,
    raw_score: bool = False,
    pred_proba: bool = False,
    pred_leaf: bool = False,
    pred_contrib: bool = False,
    dtype: _PredictionDtype = np.float32,
    **kwargs: Any
) -> dask_Array:
522
523
524
525
    """Inner predict routine.

    Parameters
    ----------
526
    model : lightgbm.LGBMClassifier, lightgbm.LGBMRegressor, or lightgbm.LGBMRanker class
527
        Fitted underlying model.
528
    data : Dask Array or Dask DataFrame of shape = [n_samples, n_features]
529
        Input feature matrix.
530
531
    raw_score : bool, optional (default=False)
        Whether to predict raw scores.
532
533
534
535
536
537
    pred_proba : bool, optional (default=False)
        Should method return results of ``predict_proba`` (``pred_proba=True``) or ``predict`` (``pred_proba=False``).
    pred_leaf : bool, optional (default=False)
        Whether to predict leaf index.
    pred_contrib : bool, optional (default=False)
        Whether to predict feature contributions.
538
    dtype : np.dtype, optional (default=np.float32)
539
        Dtype of the output.
540
    **kwargs
541
        Other parameters passed to ``predict`` or ``predict_proba`` method.
542
543
544

    Returns
    -------
545
    predicted_result : Dask Array of shape = [n_samples] or shape = [n_samples, n_classes]
546
        The predicted values.
547
    X_leaves : Dask Array of shape = [n_samples, n_trees] or shape = [n_samples, n_trees * n_classes]
548
        If ``pred_leaf=True``, the predicted leaf of every tree for each sample.
549
    X_SHAP_values : Dask Array of shape = [n_samples, n_features + 1] or shape = [n_samples, (n_features + 1) * n_classes]
550
        If ``pred_contrib=True``, the feature contributions for each sample.
551
    """
552
553
    if not all((DASK_INSTALLED, PANDAS_INSTALLED, SKLEARN_INSTALLED)):
        raise LightGBMError('dask, pandas and scikit-learn are required for lightgbm.dask')
554
    if isinstance(data, dask_DataFrame):
555
556
557
558
559
560
561
562
563
        return data.map_partitions(
            _predict_part,
            model=model,
            raw_score=raw_score,
            pred_proba=pred_proba,
            pred_leaf=pred_leaf,
            pred_contrib=pred_contrib,
            **kwargs
        ).values
564
    elif isinstance(data, dask_Array):
565
566
567
568
569
570
571
572
        return data.map_blocks(
            _predict_part,
            model=model,
            raw_score=raw_score,
            pred_proba=pred_proba,
            pred_leaf=pred_leaf,
            pred_contrib=pred_contrib,
            dtype=dtype,
573
            drop_axis=1
574
        )
575
    else:
576
        raise TypeError(f'Data must be either Dask Array or Dask DataFrame. Got {type(data)}.')
577
578


579
class _DaskLGBMModel:
580

581
582
    @property
    def client_(self) -> Client:
583
        """:obj:`dask.distributed.Client`: Dask client.
584
585
586
587
588
589
590
591
592

        This property can be passed in the constructor or updated
        with ``model.set_params(client=client)``.
        """
        if not getattr(self, "fitted_", False):
            raise LGBMNotFittedError('Cannot access property client_ before calling fit().')

        return _get_dask_client(client=self.client)

593
    def _lgb_dask_getstate(self) -> Dict[Any, Any]:
594
595
596
597
        """Remove un-picklable attributes before serialization."""
        client = self.__dict__.pop("client", None)
        self._other_params.pop("client", None)
        out = deepcopy(self.__dict__)
598
        out.update({"client": None})
599
600
601
        self.client = client
        return out

602
    def _lgb_dask_fit(
603
604
605
606
        self,
        model_factory: Type[LGBMModel],
        X: _DaskMatrixLike,
        y: _DaskCollection,
607
608
609
        sample_weight: Optional[_DaskVectorLike] = None,
        init_score: Optional[_DaskVectorLike] = None,
        group: Optional[_DaskVectorLike] = None,
610
611
        **kwargs: Any
    ) -> "_DaskLGBMModel":
612
613
        if not all((DASK_INSTALLED, PANDAS_INSTALLED, SKLEARN_INSTALLED)):
            raise LightGBMError('dask, pandas and scikit-learn are required for lightgbm.dask')
614
615

        params = self.get_params(True)
616
        params.pop("client", None)
617
618

        model = _train(
619
            client=_get_dask_client(self.client),
620
621
622
623
624
            data=X,
            label=y,
            params=params,
            model_factory=model_factory,
            sample_weight=sample_weight,
625
            init_score=init_score,
626
627
628
            group=group,
            **kwargs
        )
629
630

        self.set_params(**model.get_params())
631
        self._lgb_dask_copy_extra_params(model, self)
632
633
634

        return self

635
    def _lgb_dask_to_local(self, model_factory: Type[LGBMModel]) -> LGBMModel:
636
637
638
        params = self.get_params()
        params.pop("client", None)
        model = model_factory(**params)
639
        self._lgb_dask_copy_extra_params(self, model)
640
        model._other_params.pop("client", None)
641
642
643
        return model

    @staticmethod
644
    def _lgb_dask_copy_extra_params(source: Union["_DaskLGBMModel", LGBMModel], dest: Union["_DaskLGBMModel", LGBMModel]) -> None:
645
646
647
648
        params = source.get_params()
        attributes = source.__dict__
        extra_param_names = set(attributes.keys()).difference(params.keys())
        for name in extra_param_names:
649
            setattr(dest, name, attributes[name])
650
651


652
class DaskLGBMClassifier(LGBMClassifier, _DaskLGBMModel):
653
654
    """Distributed version of lightgbm.LGBMClassifier."""

655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
    def __init__(
        self,
        boosting_type: str = 'gbdt',
        num_leaves: int = 31,
        max_depth: int = -1,
        learning_rate: float = 0.1,
        n_estimators: int = 100,
        subsample_for_bin: int = 200000,
        objective: Optional[Union[Callable, str]] = None,
        class_weight: Optional[Union[dict, str]] = None,
        min_split_gain: float = 0.,
        min_child_weight: float = 1e-3,
        min_child_samples: int = 20,
        subsample: float = 1.,
        subsample_freq: int = 0,
        colsample_bytree: float = 1.,
        reg_alpha: float = 0.,
        reg_lambda: float = 0.,
        random_state: Optional[Union[int, np.random.RandomState]] = None,
        n_jobs: int = -1,
        silent: bool = True,
        importance_type: str = 'split',
        client: Optional[Client] = None,
        **kwargs: Any
    ):
        """Docstring is inherited from the lightgbm.LGBMClassifier.__init__."""
        self.client = client
        super().__init__(
            boosting_type=boosting_type,
            num_leaves=num_leaves,
            max_depth=max_depth,
            learning_rate=learning_rate,
            n_estimators=n_estimators,
            subsample_for_bin=subsample_for_bin,
            objective=objective,
            class_weight=class_weight,
            min_split_gain=min_split_gain,
            min_child_weight=min_child_weight,
            min_child_samples=min_child_samples,
            subsample=subsample,
            subsample_freq=subsample_freq,
            colsample_bytree=colsample_bytree,
            reg_alpha=reg_alpha,
            reg_lambda=reg_lambda,
            random_state=random_state,
            n_jobs=n_jobs,
            silent=silent,
            importance_type=importance_type,
            **kwargs
        )

    _base_doc = LGBMClassifier.__init__.__doc__
    _before_kwargs, _kwargs, _after_kwargs = _base_doc.partition('**kwargs')
708
709
710
711
712
    _base_doc = f"""
        {_before_kwargs}client : dask.distributed.Client or None, optional (default=None)
        {' ':4}Dask client. If ``None``, ``distributed.default_client()`` will be used at runtime. The Dask client used by this class will not be saved if the model object is pickled.
        {_kwargs}{_after_kwargs}
        """
713

714
715
716
717
    # the note on custom objective functions in LGBMModel.__init__ is not
    # currently relevant for the Dask estimators
    __init__.__doc__ = _base_doc[:_base_doc.find('Note\n')]

718
    def __getstate__(self) -> Dict[Any, Any]:
719
        return self._lgb_dask_getstate()
720

721
722
723
724
    def fit(
        self,
        X: _DaskMatrixLike,
        y: _DaskCollection,
725
726
        sample_weight: Optional[_DaskVectorLike] = None,
        init_score: Optional[_DaskVectorLike] = None,
727
728
        **kwargs: Any
    ) -> "DaskLGBMClassifier":
729
        """Docstring is inherited from the lightgbm.LGBMClassifier.fit."""
730
        return self._lgb_dask_fit(
731
732
733
734
            model_factory=LGBMClassifier,
            X=X,
            y=y,
            sample_weight=sample_weight,
735
            init_score=init_score,
736
737
738
            **kwargs
        )

739
740
741
    _base_doc = _lgbmmodel_doc_fit.format(
        X_shape="Dask Array or Dask DataFrame of shape = [n_samples, n_features]",
        y_shape="Dask Array, Dask DataFrame or Dask Series of shape = [n_samples]",
742
743
744
        sample_weight_shape="Dask Array or Dask Series of shape = [n_samples] or None, optional (default=None)",
        init_score_shape="Dask Array or Dask Series of shape = [n_samples] or None, optional (default=None)",
        group_shape="Dask Array or Dask Series or None, optional (default=None)"
745
746
    )

747
748
    # DaskLGBMClassifier does not support evaluation data, or early stopping
    _base_doc = (_base_doc[:_base_doc.find('group :')]
749
750
751
                 + _base_doc[_base_doc.find('verbose :'):])

    # DaskLGBMClassifier support for callbacks and init_model is not tested
752
753
754
    fit.__doc__ = f"""{_base_doc[:_base_doc.find('callbacks :')]}**kwargs
        Other parameters passed through to ``LGBMClassifier.fit()``.
        """
755

756
    def predict(self, X: _DaskMatrixLike, **kwargs: Any) -> dask_Array:
757
        """Docstring is inherited from the lightgbm.LGBMClassifier.predict."""
758
759
760
761
762
763
764
        return _predict(
            model=self.to_local(),
            data=X,
            dtype=self.classes_.dtype,
            **kwargs
        )

765
766
767
768
769
770
771
772
    predict.__doc__ = _lgbmmodel_doc_predict.format(
        description="Return the predicted value for each sample.",
        X_shape="Dask Array or Dask DataFrame of shape = [n_samples, n_features]",
        output_name="predicted_result",
        predicted_result_shape="Dask Array of shape = [n_samples] or shape = [n_samples, n_classes]",
        X_leaves_shape="Dask Array of shape = [n_samples, n_trees] or shape = [n_samples, n_trees * n_classes]",
        X_SHAP_values_shape="Dask Array of shape = [n_samples, n_features + 1] or shape = [n_samples, (n_features + 1) * n_classes]"
    )
773

774
    def predict_proba(self, X: _DaskMatrixLike, **kwargs: Any) -> dask_Array:
775
        """Docstring is inherited from the lightgbm.LGBMClassifier.predict_proba."""
776
777
778
779
780
781
782
        return _predict(
            model=self.to_local(),
            data=X,
            pred_proba=True,
            **kwargs
        )

783
784
785
786
    predict_proba.__doc__ = _lgbmmodel_doc_predict.format(
        description="Return the predicted probability for each class for each sample.",
        X_shape="Dask Array or Dask DataFrame of shape = [n_samples, n_features]",
        output_name="predicted_probability",
787
        predicted_result_shape="Dask Array of shape = [n_samples] or shape = [n_samples, n_classes]",
788
789
790
        X_leaves_shape="Dask Array of shape = [n_samples, n_trees] or shape = [n_samples, n_trees * n_classes]",
        X_SHAP_values_shape="Dask Array of shape = [n_samples, n_features + 1] or shape = [n_samples, (n_features + 1) * n_classes]"
    )
791

792
    def to_local(self) -> LGBMClassifier:
793
794
795
796
797
        """Create regular version of lightgbm.LGBMClassifier from the distributed version.

        Returns
        -------
        model : lightgbm.LGBMClassifier
798
            Local underlying model.
799
        """
800
        return self._lgb_dask_to_local(LGBMClassifier)
801
802


803
class DaskLGBMRegressor(LGBMRegressor, _DaskLGBMModel):
804
    """Distributed version of lightgbm.LGBMRegressor."""
805

806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
    def __init__(
        self,
        boosting_type: str = 'gbdt',
        num_leaves: int = 31,
        max_depth: int = -1,
        learning_rate: float = 0.1,
        n_estimators: int = 100,
        subsample_for_bin: int = 200000,
        objective: Optional[Union[Callable, str]] = None,
        class_weight: Optional[Union[dict, str]] = None,
        min_split_gain: float = 0.,
        min_child_weight: float = 1e-3,
        min_child_samples: int = 20,
        subsample: float = 1.,
        subsample_freq: int = 0,
        colsample_bytree: float = 1.,
        reg_alpha: float = 0.,
        reg_lambda: float = 0.,
        random_state: Optional[Union[int, np.random.RandomState]] = None,
        n_jobs: int = -1,
        silent: bool = True,
        importance_type: str = 'split',
        client: Optional[Client] = None,
        **kwargs: Any
    ):
        """Docstring is inherited from the lightgbm.LGBMRegressor.__init__."""
        self.client = client
        super().__init__(
            boosting_type=boosting_type,
            num_leaves=num_leaves,
            max_depth=max_depth,
            learning_rate=learning_rate,
            n_estimators=n_estimators,
            subsample_for_bin=subsample_for_bin,
            objective=objective,
            class_weight=class_weight,
            min_split_gain=min_split_gain,
            min_child_weight=min_child_weight,
            min_child_samples=min_child_samples,
            subsample=subsample,
            subsample_freq=subsample_freq,
            colsample_bytree=colsample_bytree,
            reg_alpha=reg_alpha,
            reg_lambda=reg_lambda,
            random_state=random_state,
            n_jobs=n_jobs,
            silent=silent,
            importance_type=importance_type,
            **kwargs
        )

    _base_doc = LGBMRegressor.__init__.__doc__
    _before_kwargs, _kwargs, _after_kwargs = _base_doc.partition('**kwargs')
859
860
861
862
863
    _base_doc = f"""
        {_before_kwargs}client : dask.distributed.Client or None, optional (default=None)
        {' ':4}Dask client. If ``None``, ``distributed.default_client()`` will be used at runtime. The Dask client used by this class will not be saved if the model object is pickled.
        {_kwargs}{_after_kwargs}
        """
864
865
866
867
    # the note on custom objective functions in LGBMModel.__init__ is not
    # currently relevant for the Dask estimators
    __init__.__doc__ = _base_doc[:_base_doc.find('Note\n')]

868
    def __getstate__(self) -> Dict[Any, Any]:
869
        return self._lgb_dask_getstate()
870

871
872
873
874
    def fit(
        self,
        X: _DaskMatrixLike,
        y: _DaskCollection,
875
876
        sample_weight: Optional[_DaskVectorLike] = None,
        init_score: Optional[_DaskVectorLike] = None,
877
878
        **kwargs: Any
    ) -> "DaskLGBMRegressor":
879
        """Docstring is inherited from the lightgbm.LGBMRegressor.fit."""
880
        return self._lgb_dask_fit(
881
882
883
884
            model_factory=LGBMRegressor,
            X=X,
            y=y,
            sample_weight=sample_weight,
885
            init_score=init_score,
886
887
888
            **kwargs
        )

889
890
891
    _base_doc = _lgbmmodel_doc_fit.format(
        X_shape="Dask Array or Dask DataFrame of shape = [n_samples, n_features]",
        y_shape="Dask Array, Dask DataFrame or Dask Series of shape = [n_samples]",
892
893
894
        sample_weight_shape="Dask Array or Dask Series of shape = [n_samples] or None, optional (default=None)",
        init_score_shape="Dask Array or Dask Series of shape = [n_samples] or None, optional (default=None)",
        group_shape="Dask Array or Dask Series or None, optional (default=None)"
895
896
    )

897
898
    # DaskLGBMRegressor does not support evaluation data, or early stopping
    _base_doc = (_base_doc[:_base_doc.find('group :')]
899
900
901
                 + _base_doc[_base_doc.find('verbose :'):])

    # DaskLGBMRegressor support for callbacks and init_model is not tested
902
903
904
    fit.__doc__ = f"""{_base_doc[:_base_doc.find('callbacks :')]}**kwargs
        Other parameters passed through to ``LGBMRegressor.fit()``.
        """
905

906
    def predict(self, X: _DaskMatrixLike, **kwargs) -> dask_Array:
907
        """Docstring is inherited from the lightgbm.LGBMRegressor.predict."""
908
909
910
911
912
913
        return _predict(
            model=self.to_local(),
            data=X,
            **kwargs
        )

914
915
916
917
918
919
920
921
    predict.__doc__ = _lgbmmodel_doc_predict.format(
        description="Return the predicted value for each sample.",
        X_shape="Dask Array or Dask DataFrame of shape = [n_samples, n_features]",
        output_name="predicted_result",
        predicted_result_shape="Dask Array of shape = [n_samples]",
        X_leaves_shape="Dask Array of shape = [n_samples, n_trees]",
        X_SHAP_values_shape="Dask Array of shape = [n_samples, n_features + 1]"
    )
922

923
    def to_local(self) -> LGBMRegressor:
924
925
926
927
928
        """Create regular version of lightgbm.LGBMRegressor from the distributed version.

        Returns
        -------
        model : lightgbm.LGBMRegressor
929
            Local underlying model.
930
        """
931
        return self._lgb_dask_to_local(LGBMRegressor)
932
933


934
class DaskLGBMRanker(LGBMRanker, _DaskLGBMModel):
935
    """Distributed version of lightgbm.LGBMRanker."""
936

937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
    def __init__(
        self,
        boosting_type: str = 'gbdt',
        num_leaves: int = 31,
        max_depth: int = -1,
        learning_rate: float = 0.1,
        n_estimators: int = 100,
        subsample_for_bin: int = 200000,
        objective: Optional[Union[Callable, str]] = None,
        class_weight: Optional[Union[dict, str]] = None,
        min_split_gain: float = 0.,
        min_child_weight: float = 1e-3,
        min_child_samples: int = 20,
        subsample: float = 1.,
        subsample_freq: int = 0,
        colsample_bytree: float = 1.,
        reg_alpha: float = 0.,
        reg_lambda: float = 0.,
        random_state: Optional[Union[int, np.random.RandomState]] = None,
        n_jobs: int = -1,
        silent: bool = True,
        importance_type: str = 'split',
        client: Optional[Client] = None,
        **kwargs: Any
    ):
        """Docstring is inherited from the lightgbm.LGBMRanker.__init__."""
        self.client = client
        super().__init__(
            boosting_type=boosting_type,
            num_leaves=num_leaves,
            max_depth=max_depth,
            learning_rate=learning_rate,
            n_estimators=n_estimators,
            subsample_for_bin=subsample_for_bin,
            objective=objective,
            class_weight=class_weight,
            min_split_gain=min_split_gain,
            min_child_weight=min_child_weight,
            min_child_samples=min_child_samples,
            subsample=subsample,
            subsample_freq=subsample_freq,
            colsample_bytree=colsample_bytree,
            reg_alpha=reg_alpha,
            reg_lambda=reg_lambda,
            random_state=random_state,
            n_jobs=n_jobs,
            silent=silent,
            importance_type=importance_type,
            **kwargs
        )

    _base_doc = LGBMRanker.__init__.__doc__
    _before_kwargs, _kwargs, _after_kwargs = _base_doc.partition('**kwargs')
990
991
992
993
994
    _base_doc = f"""
        {_before_kwargs}client : dask.distributed.Client or None, optional (default=None)
        {' ':4}Dask client. If ``None``, ``distributed.default_client()`` will be used at runtime. The Dask client used by this class will not be saved if the model object is pickled.
        {_kwargs}{_after_kwargs}
        """
995

996
997
998
999
    # the note on custom objective functions in LGBMModel.__init__ is not
    # currently relevant for the Dask estimators
    __init__.__doc__ = _base_doc[:_base_doc.find('Note\n')]

1000
    def __getstate__(self) -> Dict[Any, Any]:
1001
        return self._lgb_dask_getstate()
1002

1003
1004
1005
1006
    def fit(
        self,
        X: _DaskMatrixLike,
        y: _DaskCollection,
1007
1008
1009
        sample_weight: Optional[_DaskVectorLike] = None,
        init_score: Optional[_DaskVectorLike] = None,
        group: Optional[_DaskVectorLike] = None,
1010
1011
        **kwargs: Any
    ) -> "DaskLGBMRanker":
1012
        """Docstring is inherited from the lightgbm.LGBMRanker.fit."""
1013
        return self._lgb_dask_fit(
1014
1015
1016
1017
            model_factory=LGBMRanker,
            X=X,
            y=y,
            sample_weight=sample_weight,
1018
            init_score=init_score,
1019
1020
1021
1022
            group=group,
            **kwargs
        )

1023
1024
1025
    _base_doc = _lgbmmodel_doc_fit.format(
        X_shape="Dask Array or Dask DataFrame of shape = [n_samples, n_features]",
        y_shape="Dask Array, Dask DataFrame or Dask Series of shape = [n_samples]",
1026
1027
1028
        sample_weight_shape="Dask Array or Dask Series of shape = [n_samples] or None, optional (default=None)",
        init_score_shape="Dask Array or Dask Series of shape = [n_samples] or None, optional (default=None)",
        group_shape="Dask Array or Dask Series or None, optional (default=None)"
1029
1030
    )

1031
    # DaskLGBMRanker does not support evaluation data, or early stopping
1032
1033
1034
1035
    _base_doc = (_base_doc[:_base_doc.find('eval_set :')]
                 + _base_doc[_base_doc.find('verbose :'):])

    # DaskLGBMRanker support for callbacks and init_model is not tested
1036
1037
1038
    fit.__doc__ = f"""{_base_doc[:_base_doc.find('callbacks :')]}**kwargs
        Other parameters passed through to ``LGBMRanker.fit()``.
        """
1039

1040
    def predict(self, X: _DaskMatrixLike, **kwargs: Any) -> dask_Array:
1041
1042
        """Docstring is inherited from the lightgbm.LGBMRanker.predict."""
        return _predict(self.to_local(), X, **kwargs)
1043

1044
1045
1046
1047
1048
1049
1050
1051
    predict.__doc__ = _lgbmmodel_doc_predict.format(
        description="Return the predicted value for each sample.",
        X_shape="Dask Array or Dask DataFrame of shape = [n_samples, n_features]",
        output_name="predicted_result",
        predicted_result_shape="Dask Array of shape = [n_samples]",
        X_leaves_shape="Dask Array of shape = [n_samples, n_trees]",
        X_SHAP_values_shape="Dask Array of shape = [n_samples, n_features + 1]"
    )
1052

1053
    def to_local(self) -> LGBMRanker:
1054
1055
1056
1057
1058
        """Create regular version of lightgbm.LGBMRanker from the distributed version.

        Returns
        -------
        model : lightgbm.LGBMRanker
1059
            Local underlying model.
1060
        """
1061
        return self._lgb_dask_to_local(LGBMRanker)