basic.py 106 KB
Newer Older
wxchan's avatar
wxchan committed
1
# coding: utf-8
2
3
# pylint: disable = invalid-name, C0111, C0301
# pylint: disable = R0912, R0913, R0914, W0105, W0201, W0212
4
"""Wrapper for C API of LightGBM."""
wxchan's avatar
wxchan committed
5
6
from __future__ import absolute_import

7
import copy
wxchan's avatar
wxchan committed
8
import ctypes
9
import os
wxchan's avatar
wxchan committed
10
import warnings
wxchan's avatar
wxchan committed
11
from tempfile import NamedTemporaryFile
wxchan's avatar
wxchan committed
12
13
14
15

import numpy as np
import scipy.sparse

16
17
from .compat import (PANDAS_INSTALLED, DataFrame, Series,
                     DataTable,
18
19
                     decode_string, string_type,
                     integer_types, numeric_types,
20
                     json, json_default_with_numpy,
21
                     range_, zip_)
wxchan's avatar
wxchan committed
22
23
from .libpath import find_lib_path

wxchan's avatar
wxchan committed
24

wxchan's avatar
wxchan committed
25
def _load_lib():
26
    """Load LightGBM library."""
wxchan's avatar
wxchan committed
27
28
    lib_path = find_lib_path()
    if len(lib_path) == 0:
29
        return None
wxchan's avatar
wxchan committed
30
31
32
33
    lib = ctypes.cdll.LoadLibrary(lib_path[0])
    lib.LGBM_GetLastError.restype = ctypes.c_char_p
    return lib

wxchan's avatar
wxchan committed
34

wxchan's avatar
wxchan committed
35
36
_LIB = _load_lib()

wxchan's avatar
wxchan committed
37

wxchan's avatar
wxchan committed
38
def _safe_call(ret):
39
40
    """Check the return value from C API call.

wxchan's avatar
wxchan committed
41
42
43
    Parameters
    ----------
    ret : int
44
        The return value from C API calls.
wxchan's avatar
wxchan committed
45
46
    """
    if ret != 0:
47
        raise LightGBMError(decode_string(_LIB.LGBM_GetLastError()))
wxchan's avatar
wxchan committed
48

wxchan's avatar
wxchan committed
49

wxchan's avatar
wxchan committed
50
def is_numeric(obj):
51
    """Check whether object is a number or not, include numpy number, etc."""
wxchan's avatar
wxchan committed
52
53
54
    try:
        float(obj)
        return True
wxchan's avatar
wxchan committed
55
56
57
    except (TypeError, ValueError):
        # TypeError: obj is not a string or a number
        # ValueError: invalid literal
wxchan's avatar
wxchan committed
58
59
        return False

wxchan's avatar
wxchan committed
60

wxchan's avatar
wxchan committed
61
def is_numpy_1d_array(data):
62
    """Check whether data is a numpy 1-D array."""
63
    return isinstance(data, np.ndarray) and len(data.shape) == 1
wxchan's avatar
wxchan committed
64

wxchan's avatar
wxchan committed
65

wxchan's avatar
wxchan committed
66
def is_1d_list(data):
67
68
    """Check whether data is a 1-D list."""
    return isinstance(data, list) and (not data or is_numeric(data[0]))
wxchan's avatar
wxchan committed
69

wxchan's avatar
wxchan committed
70

71
def list_to_1d_numpy(data, dtype=np.float32, name='list'):
72
    """Convert data to numpy 1-D array."""
wxchan's avatar
wxchan committed
73
74
75
76
77
78
79
    if is_numpy_1d_array(data):
        if data.dtype == dtype:
            return data
        else:
            return data.astype(dtype=dtype, copy=False)
    elif is_1d_list(data):
        return np.array(data, dtype=dtype, copy=False)
80
81
    elif isinstance(data, Series):
        return data.values.astype(dtype)
wxchan's avatar
wxchan committed
82
    else:
83
84
        raise TypeError("Wrong type({0}) for {1}.\n"
                        "It should be list, numpy 1-D array or pandas Series".format(type(data).__name__, name))
wxchan's avatar
wxchan committed
85

wxchan's avatar
wxchan committed
86

wxchan's avatar
wxchan committed
87
def cfloat32_array_to_numpy(cptr, length):
88
    """Convert a ctypes float pointer array to a numpy array."""
wxchan's avatar
wxchan committed
89
    if isinstance(cptr, ctypes.POINTER(ctypes.c_float)):
90
        return np.fromiter(cptr, dtype=np.float32, count=length)
wxchan's avatar
wxchan committed
91
    else:
92
        raise RuntimeError('Expected float pointer')
wxchan's avatar
wxchan committed
93

Guolin Ke's avatar
Guolin Ke committed
94

Guolin Ke's avatar
Guolin Ke committed
95
def cfloat64_array_to_numpy(cptr, length):
96
    """Convert a ctypes double pointer array to a numpy array."""
Guolin Ke's avatar
Guolin Ke committed
97
98
99
100
101
    if isinstance(cptr, ctypes.POINTER(ctypes.c_double)):
        return np.fromiter(cptr, dtype=np.float64, count=length)
    else:
        raise RuntimeError('Expected double pointer')

wxchan's avatar
wxchan committed
102

wxchan's avatar
wxchan committed
103
def cint32_array_to_numpy(cptr, length):
104
    """Convert a ctypes int pointer array to a numpy array."""
wxchan's avatar
wxchan committed
105
    if isinstance(cptr, ctypes.POINTER(ctypes.c_int32)):
106
        return np.fromiter(cptr, dtype=np.int32, count=length)
wxchan's avatar
wxchan committed
107
    else:
108
        raise RuntimeError('Expected int pointer')
wxchan's avatar
wxchan committed
109

wxchan's avatar
wxchan committed
110

111
112
113
114
115
116
117
118
def cint8_array_to_numpy(cptr, length):
    """Convert a ctypes int pointer array to a numpy array."""
    if isinstance(cptr, ctypes.POINTER(ctypes.c_int8)):
        return np.fromiter(cptr, dtype=np.int8, count=length)
    else:
        raise RuntimeError('Expected int pointer')


wxchan's avatar
wxchan committed
119
def c_str(string):
120
    """Convert a Python string to C string."""
wxchan's avatar
wxchan committed
121
122
    return ctypes.c_char_p(string.encode('utf-8'))

wxchan's avatar
wxchan committed
123

wxchan's avatar
wxchan committed
124
def c_array(ctype, values):
125
    """Convert a Python array to C array."""
wxchan's avatar
wxchan committed
126
127
    return (ctype * len(values))(*values)

wxchan's avatar
wxchan committed
128

wxchan's avatar
wxchan committed
129
def param_dict_to_str(data):
130
    """Convert Python dictionary to string, which is passed to C API."""
131
    if data is None or not data:
wxchan's avatar
wxchan committed
132
133
134
        return ""
    pairs = []
    for key, val in data.items():
135
        if isinstance(val, (list, tuple, set)) or is_numpy_1d_array(val):
wxchan's avatar
wxchan committed
136
            pairs.append(str(key) + '=' + ','.join(map(str, val)))
wxchan's avatar
wxchan committed
137
        elif isinstance(val, string_type) or isinstance(val, numeric_types) or is_numeric(val):
wxchan's avatar
wxchan committed
138
            pairs.append(str(key) + '=' + str(val))
139
        elif val is not None:
140
            raise TypeError('Unknown type of parameter:%s, got:%s'
wxchan's avatar
wxchan committed
141
142
                            % (key, type(val).__name__))
    return ' '.join(pairs)
143

wxchan's avatar
wxchan committed
144

145
class _TempFile(object):
146
147
148
149
    def __enter__(self):
        with NamedTemporaryFile(prefix="lightgbm_tmp_", delete=True) as f:
            self.name = f.name
        return self
wxchan's avatar
wxchan committed
150

151
152
153
    def __exit__(self, exc_type, exc_val, exc_tb):
        if os.path.isfile(self.name):
            os.remove(self.name)
wxchan's avatar
wxchan committed
154

155
156
157
158
    def readlines(self):
        with open(self.name, "r+") as f:
            ret = f.readlines()
        return ret
wxchan's avatar
wxchan committed
159

160
161
    def writelines(self, lines):
        with open(self.name, "w+") as f:
162
            f.writelines(lines)
163

wxchan's avatar
wxchan committed
164

165
class LightGBMError(Exception):
166
167
    """Error thrown by LightGBM."""

168
169
170
171
172
    pass


MAX_INT32 = (1 << 31) - 1

173
"""Macro definition of data type in C API of LightGBM"""
wxchan's avatar
wxchan committed
174
175
176
177
C_API_DTYPE_FLOAT32 = 0
C_API_DTYPE_FLOAT64 = 1
C_API_DTYPE_INT32 = 2
C_API_DTYPE_INT64 = 3
178
C_API_DTYPE_INT8 = 4
Guolin Ke's avatar
Guolin Ke committed
179

180
"""Matrix is row major in Python"""
wxchan's avatar
wxchan committed
181
182
C_API_IS_ROW_MAJOR = 1

183
"""Macro definition of prediction type in C API of LightGBM"""
wxchan's avatar
wxchan committed
184
185
186
C_API_PREDICT_NORMAL = 0
C_API_PREDICT_RAW_SCORE = 1
C_API_PREDICT_LEAF_INDEX = 2
187
C_API_PREDICT_CONTRIB = 3
wxchan's avatar
wxchan committed
188

189
"""Data type of data field"""
wxchan's avatar
wxchan committed
190
191
FIELD_TYPE_MAPPER = {"label": C_API_DTYPE_FLOAT32,
                     "weight": C_API_DTYPE_FLOAT32,
Guolin Ke's avatar
Guolin Ke committed
192
                     "init_score": C_API_DTYPE_FLOAT64,
193
194
195
                     "group": C_API_DTYPE_INT32,
                     "feature_penalty": C_API_DTYPE_FLOAT64,
                     "monotone_constraints": C_API_DTYPE_INT8}
wxchan's avatar
wxchan committed
196

197
198
PANDAS_DTYPE_MAPPER = {'int8': 'int', 'int16': 'int', 'int32': 'int',
                       'int64': 'int', 'uint8': 'int', 'uint16': 'int',
199
200
                       'uint32': 'int', 'uint64': 'int', 'bool': 'int',
                       'float16': 'float', 'float32': 'float', 'float64': 'float'}
201

wxchan's avatar
wxchan committed
202

203
def convert_from_sliced_object(data):
204
    """Fix the memory of multi-dimensional sliced object."""
205
206
    if data.base is not None and isinstance(data, np.ndarray) and isinstance(data.base, np.ndarray):
        if not data.flags.c_contiguous:
207
208
            warnings.warn("Usage of np.ndarray subset (sliced data) is not recommended "
                          "due to it will double the peak memory cost in LightGBM.")
209
210
211
212
            return np.copy(data)
    return data


wxchan's avatar
wxchan committed
213
def c_float_array(data):
214
    """Get pointer of float numpy array / list."""
wxchan's avatar
wxchan committed
215
216
217
    if is_1d_list(data):
        data = np.array(data, copy=False)
    if is_numpy_1d_array(data):
218
219
        data = convert_from_sliced_object(data)
        assert data.flags.c_contiguous
wxchan's avatar
wxchan committed
220
221
222
223
224
225
226
        if data.dtype == np.float32:
            ptr_data = data.ctypes.data_as(ctypes.POINTER(ctypes.c_float))
            type_data = C_API_DTYPE_FLOAT32
        elif data.dtype == np.float64:
            ptr_data = data.ctypes.data_as(ctypes.POINTER(ctypes.c_double))
            type_data = C_API_DTYPE_FLOAT64
        else:
227
            raise TypeError("Expected np.float32 or np.float64, met type({})"
wxchan's avatar
wxchan committed
228
229
                            .format(data.dtype))
    else:
230
        raise TypeError("Unknown type({})".format(type(data).__name__))
231
    return (ptr_data, type_data, data)  # return `data` to avoid the temporary copy is freed
wxchan's avatar
wxchan committed
232

wxchan's avatar
wxchan committed
233

wxchan's avatar
wxchan committed
234
def c_int_array(data):
235
    """Get pointer of int numpy array / list."""
wxchan's avatar
wxchan committed
236
237
238
    if is_1d_list(data):
        data = np.array(data, copy=False)
    if is_numpy_1d_array(data):
239
240
        data = convert_from_sliced_object(data)
        assert data.flags.c_contiguous
wxchan's avatar
wxchan committed
241
242
243
244
245
246
247
        if data.dtype == np.int32:
            ptr_data = data.ctypes.data_as(ctypes.POINTER(ctypes.c_int32))
            type_data = C_API_DTYPE_INT32
        elif data.dtype == np.int64:
            ptr_data = data.ctypes.data_as(ctypes.POINTER(ctypes.c_int64))
            type_data = C_API_DTYPE_INT64
        else:
248
            raise TypeError("Expected np.int32 or np.int64, met type({})"
wxchan's avatar
wxchan committed
249
250
                            .format(data.dtype))
    else:
251
        raise TypeError("Unknown type({})".format(type(data).__name__))
252
    return (ptr_data, type_data, data)  # return `data` to avoid the temporary copy is freed
wxchan's avatar
wxchan committed
253

wxchan's avatar
wxchan committed
254

255
def _data_from_pandas(data, feature_name, categorical_feature, pandas_categorical):
256
    if isinstance(data, DataFrame):
257
258
        if len(data.shape) != 2 or data.shape[0] < 1:
            raise ValueError('Input data must be 2 dimensional and non empty.')
259
260
        if feature_name == 'auto' or feature_name is None:
            data = data.rename(columns=str)
261
262
        cat_cols = list(data.select_dtypes(include=['category']).columns)
        cat_cols_not_ordered = [col for col in cat_cols if not data[col].cat.ordered]
263
264
265
266
267
        if pandas_categorical is None:  # train dataset
            pandas_categorical = [list(data[col].cat.categories) for col in cat_cols]
        else:
            if len(cat_cols) != len(pandas_categorical):
                raise ValueError('train and valid dataset categorical_feature do not match.')
268
            for col, category in zip_(cat_cols, pandas_categorical):
269
270
                if list(data[col].cat.categories) != list(category):
                    data[col] = data[col].cat.set_categories(category)
271
        if len(cat_cols):  # cat_cols is list
272
            data = data.copy()  # not alter origin DataFrame
273
            data[cat_cols] = data[cat_cols].apply(lambda x: x.cat.codes).replace({-1: np.nan})
274
275
276
        if categorical_feature is not None:
            if feature_name is None:
                feature_name = list(data.columns)
277
            if categorical_feature == 'auto':  # use cat cols from DataFrame
278
                categorical_feature = cat_cols_not_ordered
279
280
            else:  # use cat cols specified by user
                categorical_feature = list(categorical_feature)
281
282
283
284
285
286
        if feature_name == 'auto':
            feature_name = list(data.columns)
        data_dtypes = data.dtypes
        if not all(dtype.name in PANDAS_DTYPE_MAPPER for dtype in data_dtypes):
            bad_fields = [data.columns[i] for i, dtype in
                          enumerate(data_dtypes) if dtype.name not in PANDAS_DTYPE_MAPPER]
287
            raise ValueError("DataFrame.dtypes for data must be int, float or bool.\n"
288
                             "Did not expect the data types in the following fields: "
289
                             + ', '.join(bad_fields))
290
        data = data.values.astype('float')
291
292
293
294
295
296
    else:
        if feature_name == 'auto':
            feature_name = None
        if categorical_feature == 'auto':
            categorical_feature = None
    return data, feature_name, categorical_feature, pandas_categorical
297
298
299
300
301
302
303
304
305


def _label_from_pandas(label):
    if isinstance(label, DataFrame):
        if len(label.columns) > 1:
            raise ValueError('DataFrame for label cannot have multiple columns')
        label_dtypes = label.dtypes
        if not all(dtype.name in PANDAS_DTYPE_MAPPER for dtype in label_dtypes):
            raise ValueError('DataFrame.dtypes for label must be int, float or bool')
306
        label = label.values.astype('float').flatten()
307
308
309
    return label


310
311
312
313
314
315
316
317
318
319
320
def _dump_pandas_categorical(pandas_categorical, file_name=None):
    pandas_str = ('\npandas_categorical:'
                  + json.dumps(pandas_categorical, default=json_default_with_numpy)
                  + '\n')
    if file_name is not None:
        with open(file_name, 'a') as f:
            f.write(pandas_str)
    return pandas_str


def _load_pandas_categorical(file_name=None, model_str=None):
321
322
    pandas_key = 'pandas_categorical:'
    offset = -len(pandas_key)
323
    if file_name is not None:
324
325
326
327
328
329
330
331
332
333
334
335
336
        max_offset = -os.path.getsize(file_name)
        with open(file_name, 'rb') as f:
            while True:
                if offset < max_offset:
                    offset = max_offset
                f.seek(offset, os.SEEK_END)
                lines = f.readlines()
                if len(lines) >= 2:
                    break
                offset *= 2
        last_line = decode_string(lines[-1]).strip()
        if not last_line.startswith(pandas_key):
            last_line = decode_string(lines[-2]).strip()
337
    elif model_str is not None:
338
339
340
341
342
343
        idx = model_str.rfind('\n', 0, offset)
        last_line = model_str[idx:].strip()
    if last_line.startswith(pandas_key):
        return json.loads(last_line[len(pandas_key):])
    else:
        return None
344
345


Guolin Ke's avatar
Guolin Ke committed
346
class _InnerPredictor(object):
347
348
349
350
351
352
353
354
    """_InnerPredictor of LightGBM.

    Not exposed to user.
    Used only for prediction, usually used for continued training.

    Note
    ----
    Can be converted from Booster, but cannot be converted to Booster.
Guolin Ke's avatar
Guolin Ke committed
355
    """
356

357
    def __init__(self, model_file=None, booster_handle=None, pred_parameter=None):
358
        """Initialize the _InnerPredictor.
wxchan's avatar
wxchan committed
359
360
361

        Parameters
        ----------
362
        model_file : string or None, optional (default=None)
wxchan's avatar
wxchan committed
363
            Path to the model file.
364
365
366
367
        booster_handle : object or None, optional (default=None)
            Handle of Booster.
        pred_parameter: dict or None, optional (default=None)
            Other parameters for the prediciton.
wxchan's avatar
wxchan committed
368
369
370
371
372
        """
        self.handle = ctypes.c_void_p()
        self.__is_manage_handle = True
        if model_file is not None:
            """Prediction task"""
Guolin Ke's avatar
Guolin Ke committed
373
            out_num_iterations = ctypes.c_int(0)
wxchan's avatar
wxchan committed
374
375
376
377
            _safe_call(_LIB.LGBM_BoosterCreateFromModelfile(
                c_str(model_file),
                ctypes.byref(out_num_iterations),
                ctypes.byref(self.handle)))
Guolin Ke's avatar
Guolin Ke committed
378
            out_num_class = ctypes.c_int(0)
wxchan's avatar
wxchan committed
379
380
381
382
            _safe_call(_LIB.LGBM_BoosterGetNumClasses(
                self.handle,
                ctypes.byref(out_num_class)))
            self.num_class = out_num_class.value
383
            self.num_total_iteration = out_num_iterations.value
384
            self.pandas_categorical = _load_pandas_categorical(file_name=model_file)
wxchan's avatar
wxchan committed
385
        elif booster_handle is not None:
Guolin Ke's avatar
Guolin Ke committed
386
            self.__is_manage_handle = False
wxchan's avatar
wxchan committed
387
            self.handle = booster_handle
Guolin Ke's avatar
Guolin Ke committed
388
            out_num_class = ctypes.c_int(0)
wxchan's avatar
wxchan committed
389
390
391
392
            _safe_call(_LIB.LGBM_BoosterGetNumClasses(
                self.handle,
                ctypes.byref(out_num_class)))
            self.num_class = out_num_class.value
Guolin Ke's avatar
Guolin Ke committed
393
            out_num_iterations = ctypes.c_int(0)
wxchan's avatar
wxchan committed
394
395
396
            _safe_call(_LIB.LGBM_BoosterGetCurrentIteration(
                self.handle,
                ctypes.byref(out_num_iterations)))
397
            self.num_total_iteration = out_num_iterations.value
398
            self.pandas_categorical = None
wxchan's avatar
wxchan committed
399
        else:
400
            raise TypeError('Need model_file or booster_handle to create a predictor')
wxchan's avatar
wxchan committed
401

402
403
        pred_parameter = {} if pred_parameter is None else pred_parameter
        self.pred_parameter = param_dict_to_str(pred_parameter)
cbecker's avatar
cbecker committed
404

wxchan's avatar
wxchan committed
405
    def __del__(self):
406
407
408
409
410
        try:
            if self.__is_manage_handle:
                _safe_call(_LIB.LGBM_BoosterFree(self.handle))
        except AttributeError:
            pass
wxchan's avatar
wxchan committed
411

412
413
414
415
416
    def __getstate__(self):
        this = self.__dict__.copy()
        this.pop('handle', None)
        return this

wxchan's avatar
wxchan committed
417
    def predict(self, data, num_iteration=-1,
418
                raw_score=False, pred_leaf=False, pred_contrib=False, data_has_header=False,
wxchan's avatar
wxchan committed
419
                is_reshape=True):
420
        """Predict logic.
wxchan's avatar
wxchan committed
421
422
423

        Parameters
        ----------
424
        data : string, numpy array, pandas DataFrame, H2O DataTable's Frame or scipy.sparse
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
            Data source for prediction.
            When data type is string, it represents the path of txt file.
        num_iteration : int, optional (default=-1)
            Iteration used for prediction.
        raw_score : bool, optional (default=False)
            Whether to predict raw scores.
        pred_leaf : bool, optional (default=False)
            Whether to predict leaf index.
        pred_contrib : bool, optional (default=False)
            Whether to predict feature contributions.
        data_has_header : bool, optional (default=False)
            Whether data has header.
            Used only for txt data.
        is_reshape : bool, optional (default=True)
            Whether to reshape to (nrow, ncol).
wxchan's avatar
wxchan committed
440
441
442

        Returns
        -------
443
444
        result : numpy array
            Prediction result.
wxchan's avatar
wxchan committed
445
        """
wxchan's avatar
wxchan committed
446
        if isinstance(data, Dataset):
447
            raise TypeError("Cannot use Dataset instance for prediction, please use raw data instead")
448
        data = _data_from_pandas(data, None, None, self.pandas_categorical)[0]
wxchan's avatar
wxchan committed
449
450
451
452
453
        predict_type = C_API_PREDICT_NORMAL
        if raw_score:
            predict_type = C_API_PREDICT_RAW_SCORE
        if pred_leaf:
            predict_type = C_API_PREDICT_LEAF_INDEX
454
455
        if pred_contrib:
            predict_type = C_API_PREDICT_CONTRIB
wxchan's avatar
wxchan committed
456
        int_data_has_header = 1 if data_has_header else 0
457
458
        if num_iteration > self.num_total_iteration:
            num_iteration = self.num_total_iteration
cbecker's avatar
cbecker committed
459

wxchan's avatar
wxchan committed
460
        if isinstance(data, string_type):
461
            with _TempFile() as f:
wxchan's avatar
wxchan committed
462
463
464
                _safe_call(_LIB.LGBM_BoosterPredictForFile(
                    self.handle,
                    c_str(data),
Guolin Ke's avatar
Guolin Ke committed
465
466
467
                    ctypes.c_int(int_data_has_header),
                    ctypes.c_int(predict_type),
                    ctypes.c_int(num_iteration),
468
                    c_str(self.pred_parameter),
wxchan's avatar
wxchan committed
469
470
                    c_str(f.name)))
                lines = f.readlines()
471
472
                nrow = len(lines)
                preds = [float(token) for line in lines for token in line.split('\t')]
Guolin Ke's avatar
Guolin Ke committed
473
                preds = np.array(preds, dtype=np.float64, copy=False)
wxchan's avatar
wxchan committed
474
        elif isinstance(data, scipy.sparse.csr_matrix):
475
            preds, nrow = self.__pred_for_csr(data, num_iteration, predict_type)
Guolin Ke's avatar
Guolin Ke committed
476
        elif isinstance(data, scipy.sparse.csc_matrix):
477
            preds, nrow = self.__pred_for_csc(data, num_iteration, predict_type)
wxchan's avatar
wxchan committed
478
        elif isinstance(data, np.ndarray):
479
            preds, nrow = self.__pred_for_np2d(data, num_iteration, predict_type)
480
481
482
        elif isinstance(data, list):
            try:
                data = np.array(data)
483
            except BaseException:
484
                raise ValueError('Cannot convert data list to numpy array.')
485
            preds, nrow = self.__pred_for_np2d(data, num_iteration, predict_type)
486
487
        elif isinstance(data, DataTable):
            preds, nrow = self.__pred_for_np2d(data.to_numpy(), num_iteration, predict_type)
wxchan's avatar
wxchan committed
488
489
        else:
            try:
490
                warnings.warn('Converting data to scipy sparse matrix.')
wxchan's avatar
wxchan committed
491
                csr = scipy.sparse.csr_matrix(data)
492
            except BaseException:
493
                raise TypeError('Cannot predict data for type {}'.format(type(data).__name__))
494
            preds, nrow = self.__pred_for_csr(csr, num_iteration, predict_type)
wxchan's avatar
wxchan committed
495
496
        if pred_leaf:
            preds = preds.astype(np.int32)
497
        if is_reshape and preds.size != nrow:
wxchan's avatar
wxchan committed
498
            if preds.size % nrow == 0:
499
                preds = preds.reshape(nrow, -1)
wxchan's avatar
wxchan committed
500
            else:
501
                raise ValueError('Length of predict result (%d) cannot be divide nrow (%d)'
wxchan's avatar
wxchan committed
502
503
504
505
                                 % (preds.size, nrow))
        return preds

    def __get_num_preds(self, num_iteration, nrow, predict_type):
506
        """Get size of prediction result."""
507
508
509
510
511
        if nrow > MAX_INT32:
            raise LightGBMError('LightGBM cannot perform prediction for data'
                                'with number of rows greater than MAX_INT32 (%d).\n'
                                'You can split your data into chunks'
                                'and then concatenate predictions for them' % MAX_INT32)
Guolin Ke's avatar
Guolin Ke committed
512
513
514
        n_preds = ctypes.c_int64(0)
        _safe_call(_LIB.LGBM_BoosterCalcNumPredict(
            self.handle,
Guolin Ke's avatar
Guolin Ke committed
515
516
517
            ctypes.c_int(nrow),
            ctypes.c_int(predict_type),
            ctypes.c_int(num_iteration),
Guolin Ke's avatar
Guolin Ke committed
518
519
            ctypes.byref(n_preds)))
        return n_preds.value
wxchan's avatar
wxchan committed
520
521

    def __pred_for_np2d(self, mat, num_iteration, predict_type):
522
        """Predict for a 2-D numpy matrix."""
wxchan's avatar
wxchan committed
523
        if len(mat.shape) != 2:
524
            raise ValueError('Input numpy.ndarray or list must be 2 dimensional')
wxchan's avatar
wxchan committed
525

526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
        def inner_predict(mat, num_iteration, predict_type, preds=None):
            if mat.dtype == np.float32 or mat.dtype == np.float64:
                data = np.array(mat.reshape(mat.size), dtype=mat.dtype, copy=False)
            else:
                """change non-float data to float data, need to copy"""
                data = np.array(mat.reshape(mat.size), dtype=np.float32)
            ptr_data, type_ptr_data, _ = c_float_array(data)
            n_preds = self.__get_num_preds(num_iteration, mat.shape[0], predict_type)
            if preds is None:
                preds = np.zeros(n_preds, dtype=np.float64)
            elif len(preds.shape) != 1 or len(preds) != n_preds:
                raise ValueError("Wrong length of pre-allocated predict array")
            out_num_preds = ctypes.c_int64(0)
            _safe_call(_LIB.LGBM_BoosterPredictForMat(
                self.handle,
                ptr_data,
                ctypes.c_int(type_ptr_data),
                ctypes.c_int(mat.shape[0]),
                ctypes.c_int(mat.shape[1]),
                ctypes.c_int(C_API_IS_ROW_MAJOR),
                ctypes.c_int(predict_type),
                ctypes.c_int(num_iteration),
                c_str(self.pred_parameter),
                ctypes.byref(out_num_preds),
                preds.ctypes.data_as(ctypes.POINTER(ctypes.c_double))))
            if n_preds != out_num_preds.value:
                raise ValueError("Wrong length for predict results")
            return preds, mat.shape[0]

        nrow = mat.shape[0]
        if nrow > MAX_INT32:
            sections = np.arange(start=MAX_INT32, stop=nrow, step=MAX_INT32)
            # __get_num_preds() cannot work with nrow > MAX_INT32, so calculate overall number of predictions piecemeal
            n_preds = [self.__get_num_preds(num_iteration, i, predict_type) for i in np.diff([0] + list(sections) + [nrow])]
            n_preds_sections = np.array([0] + n_preds, dtype=np.intp).cumsum()
            preds = np.zeros(sum(n_preds), dtype=np.float64)
562
563
            for chunk, (start_idx_pred, end_idx_pred) in zip_(np.array_split(mat, sections),
                                                              zip_(n_preds_sections, n_preds_sections[1:])):
564
565
566
                # avoid memory consumption by arrays concatenation operations
                inner_predict(chunk, num_iteration, predict_type, preds[start_idx_pred:end_idx_pred])
            return preds, nrow
wxchan's avatar
wxchan committed
567
        else:
568
            return inner_predict(mat, num_iteration, predict_type)
wxchan's avatar
wxchan committed
569
570

    def __pred_for_csr(self, csr, num_iteration, predict_type):
571
        """Predict for a CSR data."""
572
573
574
575
576
577
578
579
580
581
582
583
        def inner_predict(csr, num_iteration, predict_type, preds=None):
            nrow = len(csr.indptr) - 1
            n_preds = self.__get_num_preds(num_iteration, nrow, predict_type)
            if preds is None:
                preds = np.zeros(n_preds, dtype=np.float64)
            elif len(preds.shape) != 1 or len(preds) != n_preds:
                raise ValueError("Wrong length of pre-allocated predict array")
            out_num_preds = ctypes.c_int64(0)

            ptr_indptr, type_ptr_indptr, __ = c_int_array(csr.indptr)
            ptr_data, type_ptr_data, _ = c_float_array(csr.data)

584
585
586
            assert csr.shape[1] <= MAX_INT32
            csr.indices = csr.indices.astype(np.int32, copy=False)

587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
            _safe_call(_LIB.LGBM_BoosterPredictForCSR(
                self.handle,
                ptr_indptr,
                ctypes.c_int32(type_ptr_indptr),
                csr.indices.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)),
                ptr_data,
                ctypes.c_int(type_ptr_data),
                ctypes.c_int64(len(csr.indptr)),
                ctypes.c_int64(len(csr.data)),
                ctypes.c_int64(csr.shape[1]),
                ctypes.c_int(predict_type),
                ctypes.c_int(num_iteration),
                c_str(self.pred_parameter),
                ctypes.byref(out_num_preds),
                preds.ctypes.data_as(ctypes.POINTER(ctypes.c_double))))
            if n_preds != out_num_preds.value:
                raise ValueError("Wrong length for predict results")
            return preds, nrow
wxchan's avatar
wxchan committed
605

606
607
608
609
610
611
612
613
614
615
616
617
618
619
        nrow = len(csr.indptr) - 1
        if nrow > MAX_INT32:
            sections = [0] + list(np.arange(start=MAX_INT32, stop=nrow, step=MAX_INT32)) + [nrow]
            # __get_num_preds() cannot work with nrow > MAX_INT32, so calculate overall number of predictions piecemeal
            n_preds = [self.__get_num_preds(num_iteration, i, predict_type) for i in np.diff(sections)]
            n_preds_sections = np.array([0] + n_preds, dtype=np.intp).cumsum()
            preds = np.zeros(sum(n_preds), dtype=np.float64)
            for (start_idx, end_idx), (start_idx_pred, end_idx_pred) in zip_(zip_(sections, sections[1:]),
                                                                             zip_(n_preds_sections, n_preds_sections[1:])):
                # avoid memory consumption by arrays concatenation operations
                inner_predict(csr[start_idx:end_idx], num_iteration, predict_type, preds[start_idx_pred:end_idx_pred])
            return preds, nrow
        else:
            return inner_predict(csr, num_iteration, predict_type)
Guolin Ke's avatar
Guolin Ke committed
620
621

    def __pred_for_csc(self, csc, num_iteration, predict_type):
622
        """Predict for a CSC data."""
Guolin Ke's avatar
Guolin Ke committed
623
        nrow = csc.shape[0]
624
625
        if nrow > MAX_INT32:
            return self.__pred_for_csr(csc.tocsr(), num_iteration, predict_type)
Guolin Ke's avatar
Guolin Ke committed
626
627
628
629
        n_preds = self.__get_num_preds(num_iteration, nrow, predict_type)
        preds = np.zeros(n_preds, dtype=np.float64)
        out_num_preds = ctypes.c_int64(0)

630
631
        ptr_indptr, type_ptr_indptr, __ = c_int_array(csc.indptr)
        ptr_data, type_ptr_data, _ = c_float_array(csc.data)
Guolin Ke's avatar
Guolin Ke committed
632

633
634
635
        assert csc.shape[0] <= MAX_INT32
        csc.indices = csc.indices.astype(np.int32, copy=False)

Guolin Ke's avatar
Guolin Ke committed
636
637
638
        _safe_call(_LIB.LGBM_BoosterPredictForCSC(
            self.handle,
            ptr_indptr,
Guolin Ke's avatar
Guolin Ke committed
639
            ctypes.c_int32(type_ptr_indptr),
Guolin Ke's avatar
Guolin Ke committed
640
641
            csc.indices.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)),
            ptr_data,
Guolin Ke's avatar
Guolin Ke committed
642
643
644
645
646
647
            ctypes.c_int(type_ptr_data),
            ctypes.c_int64(len(csc.indptr)),
            ctypes.c_int64(len(csc.data)),
            ctypes.c_int64(csc.shape[0]),
            ctypes.c_int(predict_type),
            ctypes.c_int(num_iteration),
648
            c_str(self.pred_parameter),
Guolin Ke's avatar
Guolin Ke committed
649
            ctypes.byref(out_num_preds),
wxchan's avatar
wxchan committed
650
            preds.ctypes.data_as(ctypes.POINTER(ctypes.c_double))))
wxchan's avatar
wxchan committed
651
        if n_preds != out_num_preds.value:
652
            raise ValueError("Wrong length for predict results")
wxchan's avatar
wxchan committed
653
654
        return preds, nrow

wxchan's avatar
wxchan committed
655

wxchan's avatar
wxchan committed
656
657
class Dataset(object):
    """Dataset in LightGBM."""
658

659
    def __init__(self, data, label=None, reference=None,
660
                 weight=None, group=None, init_score=None, silent=False,
661
                 feature_name='auto', categorical_feature='auto', params=None,
wxchan's avatar
wxchan committed
662
                 free_raw_data=True):
663
        """Initialize Dataset.
664

wxchan's avatar
wxchan committed
665
666
        Parameters
        ----------
667
        data : string, numpy array, pandas DataFrame, H2O DataTable's Frame, scipy.sparse or list of numpy arrays
wxchan's avatar
wxchan committed
668
            Data source of Dataset.
669
            If string, it represents the path to txt file.
670
        label : list, numpy 1-D array, pandas Series / one-column DataFrame or None, optional (default=None)
671
672
673
            Label of the data.
        reference : Dataset or None, optional (default=None)
            If this is Dataset for validation, training data should be used as reference.
674
        weight : list, numpy 1-D array, pandas Series or None, optional (default=None)
wxchan's avatar
wxchan committed
675
            Weight for each instance.
676
        group : list, numpy 1-D array, pandas Series or None, optional (default=None)
677
            Group/query size for Dataset.
678
        init_score : list, numpy 1-D array, pandas Series or None, optional (default=None)
679
            Init score for Dataset.
680
681
682
683
684
685
686
687
688
        silent : bool, optional (default=False)
            Whether to print messages during construction.
        feature_name : list of strings or 'auto', optional (default="auto")
            Feature names.
            If 'auto' and data is pandas DataFrame, data columns names are used.
        categorical_feature : list of strings or int, or 'auto', optional (default="auto")
            Categorical features.
            If list of int, interpreted as indices.
            If list of strings, interpreted as feature names (need to specify ``feature_name`` as well).
689
            If 'auto' and data is pandas DataFrame, pandas unordered categorical columns are used.
690
            All values in categorical features should be less than int32 max value (2147483647).
691
            Large values could be memory consuming. Consider using consecutive integers starting from zero.
692
            All negative values in categorical features will be treated as missing values.
Nikita Titov's avatar
Nikita Titov committed
693
        params : dict or None, optional (default=None)
694
            Other parameters for Dataset.
Nikita Titov's avatar
Nikita Titov committed
695
        free_raw_data : bool, optional (default=True)
696
            If True, raw data is freed after constructing inner Dataset.
wxchan's avatar
wxchan committed
697
        """
wxchan's avatar
wxchan committed
698
699
700
701
702
703
        self.handle = None
        self.data = data
        self.label = label
        self.reference = reference
        self.weight = weight
        self.group = group
704
        self.init_score = init_score
wxchan's avatar
wxchan committed
705
706
        self.silent = silent
        self.feature_name = feature_name
707
        self.categorical_feature = categorical_feature
708
        self.params = copy.deepcopy(params)
wxchan's avatar
wxchan committed
709
710
        self.free_raw_data = free_raw_data
        self.used_indices = None
711
        self.need_slice = True
wxchan's avatar
wxchan committed
712
        self._predictor = None
713
        self.pandas_categorical = None
714
        self.params_back_up = None
715
716
        self.feature_penalty = None
        self.monotone_constraints = None
wxchan's avatar
wxchan committed
717
718

    def __del__(self):
719
720
721
722
        try:
            self._free_handle()
        except AttributeError:
            pass
723
724

    def _free_handle(self):
725
        if self.handle is not None:
726
            _safe_call(_LIB.LGBM_DatasetFree(self.handle))
727
            self.handle = None
Guolin Ke's avatar
Guolin Ke committed
728
729
730
        self.need_slice = True
        if self.used_indices is not None:
            self.data = None
Nikita Titov's avatar
Nikita Titov committed
731
        return self
wxchan's avatar
wxchan committed
732

Guolin Ke's avatar
Guolin Ke committed
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
    def _set_init_score_by_predictor(self, predictor, data, used_indices=None):
        data_has_header = False
        if isinstance(data, string_type):
            # check data has header or not
            if self.params.get("has_header", False) or self.params.get("header", False):
                data_has_header = True
        init_score = predictor.predict(data,
                                       raw_score=True,
                                       data_has_header=data_has_header,
                                       is_reshape=False)
        num_data = self.num_data()
        if used_indices is not None:
            assert not self.need_slice
            if isinstance(data, string_type):
                sub_init_score = np.zeros(num_data * predictor.num_class, dtype=np.float32)
                assert num_data == len(used_indices)
                for i in range_(len(used_indices)):
                    for j in range_(predictor.num_class):
                        sub_init_score[i * redictor.num_class + j] = init_score[used_indices[i] * redictor.num_class + j]
                init_score = sub_init_score
        if predictor.num_class > 1:
            # need to regroup init_score
            new_init_score = np.zeros(init_score.size, dtype=np.float32)
            for i in range_(num_data):
                for j in range_(predictor.num_class):
                    new_init_score[j * num_data + i] = init_score[i * predictor.num_class + j]
            init_score = new_init_score
        self.set_init_score(init_score)

762
    def _lazy_init(self, data, label=None, reference=None,
763
                   weight=None, group=None, init_score=None, predictor=None,
wxchan's avatar
wxchan committed
764
                   silent=False, feature_name='auto',
765
                   categorical_feature='auto', params=None):
wxchan's avatar
wxchan committed
766
767
        if data is None:
            self.handle = None
Nikita Titov's avatar
Nikita Titov committed
768
            return self
Guolin Ke's avatar
Guolin Ke committed
769
770
771
        if reference is not None:
            self.pandas_categorical = reference.pandas_categorical
            categorical_feature = reference.categorical_feature
772
773
774
775
        data, feature_name, categorical_feature, self.pandas_categorical = _data_from_pandas(data,
                                                                                             feature_name,
                                                                                             categorical_feature,
                                                                                             self.pandas_categorical)
wxchan's avatar
wxchan committed
776
        label = _label_from_pandas(label)
Guolin Ke's avatar
Guolin Ke committed
777

778
        # process for args
wxchan's avatar
wxchan committed
779
        params = {} if params is None else params
780
781
782
        args_names = (getattr(self.__class__, '_lazy_init')
                      .__code__
                      .co_varnames[:getattr(self.__class__, '_lazy_init').__code__.co_argcount])
783
784
        for key, _ in params.items():
            if key in args_names:
785
786
787
                warnings.warn('{0} keyword has been found in `params` and will be ignored.\n'
                              'Please use {0} argument of the Dataset constructor to pass this parameter.'
                              .format(key))
788
789
790
        # user can set verbose with params, it has higher priority
        if not any(verbose_alias in params for verbose_alias in ('verbose', 'verbosity')) and silent:
            params["verbose"] = -1
791
        # get categorical features
792
793
794
795
796
797
798
799
800
801
802
803
804
        if categorical_feature is not None:
            categorical_indices = set()
            feature_dict = {}
            if feature_name is not None:
                feature_dict = {name: i for i, name in enumerate(feature_name)}
            for name in categorical_feature:
                if isinstance(name, string_type) and name in feature_dict:
                    categorical_indices.add(feature_dict[name])
                elif isinstance(name, integer_types):
                    categorical_indices.add(name)
                else:
                    raise TypeError("Wrong type({}) or unknown name({}) in categorical_feature"
                                    .format(type(name).__name__, name))
805
            if categorical_indices:
806
                if "categorical_feature" in params or "categorical_column" in params:
807
                    warnings.warn('categorical_feature in param dict is overridden.')
808
809
                    params.pop("categorical_feature", None)
                    params.pop("categorical_column", None)
810
                params['categorical_column'] = sorted(categorical_indices)
811

wxchan's avatar
wxchan committed
812
        params_str = param_dict_to_str(params)
813
        # process for reference dataset
wxchan's avatar
wxchan committed
814
        ref_dataset = None
wxchan's avatar
wxchan committed
815
        if isinstance(reference, Dataset):
816
            ref_dataset = reference.construct().handle
wxchan's avatar
wxchan committed
817
818
        elif reference is not None:
            raise TypeError('Reference dataset should be None or dataset instance')
819
        # start construct data
wxchan's avatar
wxchan committed
820
        if isinstance(data, string_type):
wxchan's avatar
wxchan committed
821
822
823
824
825
826
827
828
            self.handle = ctypes.c_void_p()
            _safe_call(_LIB.LGBM_DatasetCreateFromFile(
                c_str(data),
                c_str(params_str),
                ref_dataset,
                ctypes.byref(self.handle)))
        elif isinstance(data, scipy.sparse.csr_matrix):
            self.__init_from_csr(data, params_str, ref_dataset)
Guolin Ke's avatar
Guolin Ke committed
829
830
        elif isinstance(data, scipy.sparse.csc_matrix):
            self.__init_from_csc(data, params_str, ref_dataset)
wxchan's avatar
wxchan committed
831
832
        elif isinstance(data, np.ndarray):
            self.__init_from_np2d(data, params_str, ref_dataset)
833
834
        elif isinstance(data, list) and len(data) > 0 and all(isinstance(x, np.ndarray) for x in data):
            self.__init_from_list_np2d(data, params_str, ref_dataset)
835
836
        elif isinstance(data, DataTable):
            self.__init_from_np2d(data.to_numpy(), params_str, ref_dataset)
wxchan's avatar
wxchan committed
837
838
839
840
        else:
            try:
                csr = scipy.sparse.csr_matrix(data)
                self.__init_from_csr(csr, params_str, ref_dataset)
841
            except BaseException:
wxchan's avatar
wxchan committed
842
                raise TypeError('Cannot initialize Dataset from {}'.format(type(data).__name__))
wxchan's avatar
wxchan committed
843
844
845
        if label is not None:
            self.set_label(label)
        if self.get_label() is None:
846
            raise ValueError("Label should not be None")
wxchan's avatar
wxchan committed
847
848
849
850
851
        if weight is not None:
            self.set_weight(weight)
        if group is not None:
            self.set_group(group)
        # load init score
852
853
        if init_score is not None:
            self.set_init_score(init_score)
Guolin Ke's avatar
Guolin Ke committed
854
            if predictor is not None:
855
                warnings.warn("The prediction of init_model will be overridden by init_score.")
Guolin Ke's avatar
Guolin Ke committed
856
857
858
859
        elif isinstance(predictor, _InnerPredictor):
            self._set_init_score_by_predictor(predictor, data)
        elif predictor is not None:
            raise TypeError('Wrong predictor type {}'.format(type(predictor).__name__))
Guolin Ke's avatar
Guolin Ke committed
860
        # set feature names
Nikita Titov's avatar
Nikita Titov committed
861
        return self.set_feature_name(feature_name)
wxchan's avatar
wxchan committed
862
863

    def __init_from_np2d(self, mat, params_str, ref_dataset):
864
        """Initialize data from a 2-D numpy matrix."""
wxchan's avatar
wxchan committed
865
866
867
868
869
870
871
        if len(mat.shape) != 2:
            raise ValueError('Input numpy.ndarray must be 2 dimensional')

        self.handle = ctypes.c_void_p()
        if mat.dtype == np.float32 or mat.dtype == np.float64:
            data = np.array(mat.reshape(mat.size), dtype=mat.dtype, copy=False)
        else:
872
            # change non-float data to float data, need to copy
wxchan's avatar
wxchan committed
873
874
            data = np.array(mat.reshape(mat.size), dtype=np.float32)

875
        ptr_data, type_ptr_data, _ = c_float_array(data)
wxchan's avatar
wxchan committed
876
877
        _safe_call(_LIB.LGBM_DatasetCreateFromMat(
            ptr_data,
Guolin Ke's avatar
Guolin Ke committed
878
879
880
881
            ctypes.c_int(type_ptr_data),
            ctypes.c_int(mat.shape[0]),
            ctypes.c_int(mat.shape[1]),
            ctypes.c_int(C_API_IS_ROW_MAJOR),
wxchan's avatar
wxchan committed
882
883
884
            c_str(params_str),
            ref_dataset,
            ctypes.byref(self.handle)))
Nikita Titov's avatar
Nikita Titov committed
885
        return self
wxchan's avatar
wxchan committed
886

887
    def __init_from_list_np2d(self, mats, params_str, ref_dataset):
888
        """Initialize data from a list of 2-D numpy matrices."""
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
        ncol = mats[0].shape[1]
        nrow = np.zeros((len(mats),), np.int32)
        if mats[0].dtype == np.float64:
            ptr_data = (ctypes.POINTER(ctypes.c_double) * len(mats))()
        else:
            ptr_data = (ctypes.POINTER(ctypes.c_float) * len(mats))()

        holders = []
        type_ptr_data = None

        for i, mat in enumerate(mats):
            if len(mat.shape) != 2:
                raise ValueError('Input numpy.ndarray must be 2 dimensional')

            if mat.shape[1] != ncol:
                raise ValueError('Input arrays must have same number of columns')

            nrow[i] = mat.shape[0]

            if mat.dtype == np.float32 or mat.dtype == np.float64:
                mats[i] = np.array(mat.reshape(mat.size), dtype=mat.dtype, copy=False)
            else:
                # change non-float data to float data, need to copy
                mats[i] = np.array(mat.reshape(mat.size), dtype=np.float32)

            chunk_ptr_data, chunk_type_ptr_data, holder = c_float_array(mats[i])
            if type_ptr_data is not None and chunk_type_ptr_data != type_ptr_data:
                raise ValueError('Input chunks must have same type')
            ptr_data[i] = chunk_ptr_data
            type_ptr_data = chunk_type_ptr_data
            holders.append(holder)

        self.handle = ctypes.c_void_p()
        _safe_call(_LIB.LGBM_DatasetCreateFromMats(
            ctypes.c_int(len(mats)),
            ctypes.cast(ptr_data, ctypes.POINTER(ctypes.POINTER(ctypes.c_double))),
            ctypes.c_int(type_ptr_data),
            nrow.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)),
            ctypes.c_int(ncol),
            ctypes.c_int(C_API_IS_ROW_MAJOR),
            c_str(params_str),
            ref_dataset,
            ctypes.byref(self.handle)))
Nikita Titov's avatar
Nikita Titov committed
932
        return self
933

wxchan's avatar
wxchan committed
934
    def __init_from_csr(self, csr, params_str, ref_dataset):
935
        """Initialize data from a CSR matrix."""
wxchan's avatar
wxchan committed
936
        if len(csr.indices) != len(csr.data):
937
            raise ValueError('Length mismatch: {} vs {}'.format(len(csr.indices), len(csr.data)))
wxchan's avatar
wxchan committed
938
939
        self.handle = ctypes.c_void_p()

940
941
        ptr_indptr, type_ptr_indptr, __ = c_int_array(csr.indptr)
        ptr_data, type_ptr_data, _ = c_float_array(csr.data)
wxchan's avatar
wxchan committed
942

943
944
945
        assert csr.shape[1] <= MAX_INT32
        csr.indices = csr.indices.astype(np.int32, copy=False)

wxchan's avatar
wxchan committed
946
947
        _safe_call(_LIB.LGBM_DatasetCreateFromCSR(
            ptr_indptr,
Guolin Ke's avatar
Guolin Ke committed
948
            ctypes.c_int(type_ptr_indptr),
wxchan's avatar
wxchan committed
949
950
            csr.indices.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)),
            ptr_data,
Guolin Ke's avatar
Guolin Ke committed
951
952
953
954
            ctypes.c_int(type_ptr_data),
            ctypes.c_int64(len(csr.indptr)),
            ctypes.c_int64(len(csr.data)),
            ctypes.c_int64(csr.shape[1]),
wxchan's avatar
wxchan committed
955
956
957
            c_str(params_str),
            ref_dataset,
            ctypes.byref(self.handle)))
Nikita Titov's avatar
Nikita Titov committed
958
        return self
wxchan's avatar
wxchan committed
959

Guolin Ke's avatar
Guolin Ke committed
960
    def __init_from_csc(self, csc, params_str, ref_dataset):
961
        """Initialize data from a CSC matrix."""
Guolin Ke's avatar
Guolin Ke committed
962
963
964
965
        if len(csc.indices) != len(csc.data):
            raise ValueError('Length mismatch: {} vs {}'.format(len(csc.indices), len(csc.data)))
        self.handle = ctypes.c_void_p()

966
967
        ptr_indptr, type_ptr_indptr, __ = c_int_array(csc.indptr)
        ptr_data, type_ptr_data, _ = c_float_array(csc.data)
Guolin Ke's avatar
Guolin Ke committed
968

969
970
971
        assert csc.shape[0] <= MAX_INT32
        csc.indices = csc.indices.astype(np.int32, copy=False)

Guolin Ke's avatar
Guolin Ke committed
972
973
        _safe_call(_LIB.LGBM_DatasetCreateFromCSC(
            ptr_indptr,
Guolin Ke's avatar
Guolin Ke committed
974
            ctypes.c_int(type_ptr_indptr),
Guolin Ke's avatar
Guolin Ke committed
975
976
            csc.indices.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)),
            ptr_data,
Guolin Ke's avatar
Guolin Ke committed
977
978
979
980
            ctypes.c_int(type_ptr_data),
            ctypes.c_int64(len(csc.indptr)),
            ctypes.c_int64(len(csc.data)),
            ctypes.c_int64(csc.shape[0]),
Guolin Ke's avatar
Guolin Ke committed
981
982
983
            c_str(params_str),
            ref_dataset,
            ctypes.byref(self.handle)))
Nikita Titov's avatar
Nikita Titov committed
984
        return self
Guolin Ke's avatar
Guolin Ke committed
985

wxchan's avatar
wxchan committed
986
    def construct(self):
987
988
989
990
991
        """Lazy init.

        Returns
        -------
        self : Dataset
Nikita Titov's avatar
Nikita Titov committed
992
            Constructed Dataset object.
993
        """
994
        if self.handle is None:
wxchan's avatar
wxchan committed
995
996
            if self.reference is not None:
                if self.used_indices is None:
997
                    # create valid
998
                    self._lazy_init(self.data, label=self.label, reference=self.reference,
999
1000
                                    weight=self.weight, group=self.group,
                                    init_score=self.init_score, predictor=self._predictor,
1001
                                    silent=self.silent, feature_name=self.feature_name, params=self.params)
wxchan's avatar
wxchan committed
1002
                else:
1003
                    # construct subset
wxchan's avatar
wxchan committed
1004
                    used_indices = list_to_1d_numpy(self.used_indices, np.int32, name='used_indices')
1005
                    assert used_indices.flags.c_contiguous
Guolin Ke's avatar
Guolin Ke committed
1006
1007
                    if self.reference.group is not None:
                        group_info = np.array(self.reference.group).astype(int)
1008
1009
                        _, self.group = np.unique(np.repeat(range_(len(group_info)), repeats=group_info)[self.used_indices],
                                                  return_counts=True)
1010
                    self.handle = ctypes.c_void_p()
wxchan's avatar
wxchan committed
1011
1012
                    params_str = param_dict_to_str(self.params)
                    _safe_call(_LIB.LGBM_DatasetGetSubset(
1013
                        self.reference.construct().handle,
wxchan's avatar
wxchan committed
1014
                        used_indices.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)),
Guolin Ke's avatar
Guolin Ke committed
1015
                        ctypes.c_int(used_indices.shape[0]),
wxchan's avatar
wxchan committed
1016
1017
                        c_str(params_str),
                        ctypes.byref(self.handle)))
Guolin Ke's avatar
Guolin Ke committed
1018
1019
                    if not self.free_raw_data:
                        self.get_data()
Guolin Ke's avatar
Guolin Ke committed
1020
1021
                    if self.group is not None:
                        self.set_group(self.group)
wxchan's avatar
wxchan committed
1022
1023
                    if self.get_label() is None:
                        raise ValueError("Label should not be None.")
Guolin Ke's avatar
Guolin Ke committed
1024
1025
1026
                    if isinstance(self._predictor, _InnerPredictor) and self._predictor is not self.reference._predictor:
                        self.get_data()
                        self._set_init_score_by_predictor(self._predictor, self.data, used_indices)
wxchan's avatar
wxchan committed
1027
            else:
1028
                # create train
1029
                self._lazy_init(self.data, label=self.label,
1030
1031
1032
                                weight=self.weight, group=self.group,
                                init_score=self.init_score, predictor=self._predictor,
                                silent=self.silent, feature_name=self.feature_name,
1033
                                categorical_feature=self.categorical_feature, params=self.params)
wxchan's avatar
wxchan committed
1034
1035
1036
            if self.free_raw_data:
                self.data = None
        return self
wxchan's avatar
wxchan committed
1037

wxchan's avatar
wxchan committed
1038
    def create_valid(self, data, label=None, weight=None, group=None,
1039
                     init_score=None, silent=False, params=None):
1040
        """Create validation data align with current Dataset.
wxchan's avatar
wxchan committed
1041
1042
1043

        Parameters
        ----------
1044
        data : string, numpy array, pandas DataFrame, H2O DataTable's Frame, scipy.sparse or list of numpy arrays
wxchan's avatar
wxchan committed
1045
            Data source of Dataset.
1046
            If string, it represents the path to txt file.
1047
        label : list, numpy 1-D array, pandas Series / one-column DataFrame or None, optional (default=None)
1048
1049
            Label of the data.
        weight : list, numpy 1-D array, pandas Series or None, optional (default=None)
wxchan's avatar
wxchan committed
1050
            Weight for each instance.
1051
        group : list, numpy 1-D array, pandas Series or None, optional (default=None)
1052
            Group/query size for Dataset.
1053
        init_score : list, numpy 1-D array, pandas Series or None, optional (default=None)
1054
            Init score for Dataset.
1055
1056
        silent : bool, optional (default=False)
            Whether to print messages during construction.
Nikita Titov's avatar
Nikita Titov committed
1057
        params : dict or None, optional (default=None)
1058
            Other parameters for validation Dataset.
1059
1060
1061

        Returns
        -------
Nikita Titov's avatar
Nikita Titov committed
1062
1063
        valid : Dataset
            Validation Dataset with reference to self.
wxchan's avatar
wxchan committed
1064
        """
1065
        ret = Dataset(data, label=label, reference=self,
1066
1067
                      weight=weight, group=group, init_score=init_score,
                      silent=silent, params=params, free_raw_data=self.free_raw_data)
wxchan's avatar
wxchan committed
1068
        ret._predictor = self._predictor
1069
        ret.pandas_categorical = self.pandas_categorical
wxchan's avatar
wxchan committed
1070
        return ret
wxchan's avatar
wxchan committed
1071

wxchan's avatar
wxchan committed
1072
    def subset(self, used_indices, params=None):
1073
        """Get subset of current Dataset.
wxchan's avatar
wxchan committed
1074
1075
1076
1077

        Parameters
        ----------
        used_indices : list of int
1078
            Indices used to create the subset.
Nikita Titov's avatar
Nikita Titov committed
1079
        params : dict or None, optional (default=None)
1080
            These parameters will be passed to Dataset constructor.
1081
1082
1083
1084
1085

        Returns
        -------
        subset : Dataset
            Subset of the current Dataset.
wxchan's avatar
wxchan committed
1086
        """
wxchan's avatar
wxchan committed
1087
1088
        if params is None:
            params = self.params
wxchan's avatar
wxchan committed
1089
        ret = Dataset(None, reference=self, feature_name=self.feature_name,
1090
1091
                      categorical_feature=self.categorical_feature, params=params,
                      free_raw_data=self.free_raw_data)
wxchan's avatar
wxchan committed
1092
        ret._predictor = self._predictor
1093
        ret.pandas_categorical = self.pandas_categorical
wxchan's avatar
wxchan committed
1094
1095
1096
1097
        ret.used_indices = used_indices
        return ret

    def save_binary(self, filename):
1098
        """Save Dataset to a binary file.
wxchan's avatar
wxchan committed
1099
1100
1101
1102
1103

        Parameters
        ----------
        filename : string
            Name of the output file.
Nikita Titov's avatar
Nikita Titov committed
1104
1105
1106
1107
1108

        Returns
        -------
        self : Dataset
            Returns self.
wxchan's avatar
wxchan committed
1109
1110
1111
1112
        """
        _safe_call(_LIB.LGBM_DatasetSaveBinary(
            self.construct().handle,
            c_str(filename)))
Nikita Titov's avatar
Nikita Titov committed
1113
        return self
wxchan's avatar
wxchan committed
1114
1115

    def _update_params(self, params):
1116
1117
        if self.handle is not None and params is not None:
            _safe_call(_LIB.LGBM_DatasetUpdateParam(self.handle, c_str(param_dict_to_str(params))))
wxchan's avatar
wxchan committed
1118
1119
        if not self.params:
            self.params = params
wxchan's avatar
wxchan committed
1120
        else:
1121
            self.params_back_up = copy.deepcopy(self.params)
wxchan's avatar
wxchan committed
1122
            self.params.update(params)
Nikita Titov's avatar
Nikita Titov committed
1123
        return self
wxchan's avatar
wxchan committed
1124

1125
1126
1127
    def _reverse_update_params(self):
        self.params = copy.deepcopy(self.params_back_up)
        self.params_back_up = None
1128
1129
        if self.handle is not None and self.params is not None:
            _safe_call(_LIB.LGBM_DatasetUpdateParam(self.handle, c_str(param_dict_to_str(self.params))))
Nikita Titov's avatar
Nikita Titov committed
1130
        return self
1131

wxchan's avatar
wxchan committed
1132
    def set_field(self, field_name, data):
wxchan's avatar
wxchan committed
1133
        """Set property into the Dataset.
wxchan's avatar
wxchan committed
1134
1135
1136

        Parameters
        ----------
Nikita Titov's avatar
Nikita Titov committed
1137
        field_name : string
1138
            The field name of the information.
1139
        data : list, numpy 1-D array, pandas Series or None
1140
            The array of data to be set.
Nikita Titov's avatar
Nikita Titov committed
1141
1142
1143
1144
1145

        Returns
        -------
        self : Dataset
            Dataset with set property.
wxchan's avatar
wxchan committed
1146
        """
1147
1148
        if self.handle is None:
            raise Exception("Cannot set %s before construct dataset" % field_name)
wxchan's avatar
wxchan committed
1149
        if data is None:
1150
            # set to None
wxchan's avatar
wxchan committed
1151
1152
1153
1154
            _safe_call(_LIB.LGBM_DatasetSetField(
                self.handle,
                c_str(field_name),
                None,
Guolin Ke's avatar
Guolin Ke committed
1155
1156
                ctypes.c_int(0),
                ctypes.c_int(FIELD_TYPE_MAPPER[field_name])))
Nikita Titov's avatar
Nikita Titov committed
1157
            return self
Guolin Ke's avatar
Guolin Ke committed
1158
1159
1160
1161
1162
        dtype = np.float32
        if field_name == 'group':
            dtype = np.int32
        elif field_name == 'init_score':
            dtype = np.float64
1163
        data = list_to_1d_numpy(data, dtype, name=field_name)
1164
1165
        if data.dtype == np.float32 or data.dtype == np.float64:
            ptr_data, type_data, _ = c_float_array(data)
wxchan's avatar
wxchan committed
1166
        elif data.dtype == np.int32:
1167
            ptr_data, type_data, _ = c_int_array(data)
wxchan's avatar
wxchan committed
1168
        else:
Guolin Ke's avatar
Guolin Ke committed
1169
            raise TypeError("Excepted np.float32/64 or np.int32, meet type({})".format(data.dtype))
wxchan's avatar
wxchan committed
1170
        if type_data != FIELD_TYPE_MAPPER[field_name]:
1171
            raise TypeError("Input type error for set_field")
wxchan's avatar
wxchan committed
1172
1173
1174
1175
        _safe_call(_LIB.LGBM_DatasetSetField(
            self.handle,
            c_str(field_name),
            ptr_data,
Guolin Ke's avatar
Guolin Ke committed
1176
1177
            ctypes.c_int(len(data)),
            ctypes.c_int(type_data)))
Nikita Titov's avatar
Nikita Titov committed
1178
        return self
wxchan's avatar
wxchan committed
1179

wxchan's avatar
wxchan committed
1180
1181
    def get_field(self, field_name):
        """Get property from the Dataset.
wxchan's avatar
wxchan committed
1182
1183
1184

        Parameters
        ----------
Nikita Titov's avatar
Nikita Titov committed
1185
        field_name : string
1186
            The field name of the information.
wxchan's avatar
wxchan committed
1187
1188
1189

        Returns
        -------
1190
1191
        info : numpy array
            A numpy array with information from the Dataset.
Guolin Ke's avatar
Guolin Ke committed
1192
        """
1193
        if self.handle is None:
1194
            raise Exception("Cannot get %s before construct Dataset" % field_name)
Guolin Ke's avatar
Guolin Ke committed
1195
1196
        tmp_out_len = ctypes.c_int()
        out_type = ctypes.c_int()
wxchan's avatar
wxchan committed
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
        ret = ctypes.POINTER(ctypes.c_void_p)()
        _safe_call(_LIB.LGBM_DatasetGetField(
            self.handle,
            c_str(field_name),
            ctypes.byref(tmp_out_len),
            ctypes.byref(ret),
            ctypes.byref(out_type)))
        if out_type.value != FIELD_TYPE_MAPPER[field_name]:
            raise TypeError("Return type error for get_field")
        if tmp_out_len.value == 0:
            return None
        if out_type.value == C_API_DTYPE_INT32:
            return cint32_array_to_numpy(ctypes.cast(ret, ctypes.POINTER(ctypes.c_int32)), tmp_out_len.value)
        elif out_type.value == C_API_DTYPE_FLOAT32:
            return cfloat32_array_to_numpy(ctypes.cast(ret, ctypes.POINTER(ctypes.c_float)), tmp_out_len.value)
Guolin Ke's avatar
Guolin Ke committed
1212
1213
        elif out_type.value == C_API_DTYPE_FLOAT64:
            return cfloat64_array_to_numpy(ctypes.cast(ret, ctypes.POINTER(ctypes.c_double)), tmp_out_len.value)
1214
1215
        elif out_type.value == C_API_DTYPE_INT8:
            return cint8_array_to_numpy(ctypes.cast(ret, ctypes.POINTER(ctypes.c_int8)), tmp_out_len.value)
1216
        else:
wxchan's avatar
wxchan committed
1217
            raise TypeError("Unknown type")
Guolin Ke's avatar
Guolin Ke committed
1218

1219
    def set_categorical_feature(self, categorical_feature):
1220
        """Set categorical features.
1221
1222
1223

        Parameters
        ----------
1224
1225
        categorical_feature : list of int or strings
            Names or indices of categorical features.
Nikita Titov's avatar
Nikita Titov committed
1226
1227
1228
1229
1230

        Returns
        -------
        self : Dataset
            Dataset with set categorical features.
1231
1232
        """
        if self.categorical_feature == categorical_feature:
Nikita Titov's avatar
Nikita Titov committed
1233
            return self
1234
        if self.data is not None:
1235
1236
            if self.categorical_feature is None:
                self.categorical_feature = categorical_feature
Nikita Titov's avatar
Nikita Titov committed
1237
                return self._free_handle()
1238
1239
            elif categorical_feature == 'auto':
                warnings.warn('Using categorical_feature in Dataset.')
Nikita Titov's avatar
Nikita Titov committed
1240
                return self
1241
            else:
1242
1243
                warnings.warn('categorical_feature in Dataset is overridden.\n'
                              'New categorical_feature is {}'.format(sorted(list(categorical_feature))))
1244
                self.categorical_feature = categorical_feature
Nikita Titov's avatar
Nikita Titov committed
1245
                return self._free_handle()
1246
        else:
1247
1248
            raise LightGBMError("Cannot set categorical feature after freed raw data, "
                                "set free_raw_data=False when construct Dataset to avoid this.")
1249

Guolin Ke's avatar
Guolin Ke committed
1250
    def _set_predictor(self, predictor):
1251
1252
1253
1254
        """Set predictor for continued training.

        It is not recommended for user to call this function.
        Please use init_model argument in engine.train() or engine.cv() instead.
Guolin Ke's avatar
Guolin Ke committed
1255
1256
        """
        if predictor is self._predictor:
Nikita Titov's avatar
Nikita Titov committed
1257
            return self
Guolin Ke's avatar
Guolin Ke committed
1258
        if self.data is not None or (self.used_indices is not None and self.reference is not None and self.reference.data is not None):
Guolin Ke's avatar
Guolin Ke committed
1259
            self._predictor = predictor
Nikita Titov's avatar
Nikita Titov committed
1260
            return self._free_handle()
Guolin Ke's avatar
Guolin Ke committed
1261
        else:
1262
1263
            raise LightGBMError("Cannot set predictor after freed raw data, "
                                "set free_raw_data=False when construct Dataset to avoid this.")
Guolin Ke's avatar
Guolin Ke committed
1264
1265

    def set_reference(self, reference):
1266
        """Set reference Dataset.
Guolin Ke's avatar
Guolin Ke committed
1267
1268
1269
1270

        Parameters
        ----------
        reference : Dataset
1271
            Reference that is used as a template to construct the current Dataset.
Nikita Titov's avatar
Nikita Titov committed
1272
1273
1274
1275
1276

        Returns
        -------
        self : Dataset
            Dataset with set reference.
Guolin Ke's avatar
Guolin Ke committed
1277
        """
1278
1279
1280
        self.set_categorical_feature(reference.categorical_feature) \
            .set_feature_name(reference.feature_name) \
            ._set_predictor(reference._predictor)
1281
1282
        # we're done if self and reference share a common upstrem reference
        if self.get_ref_chain().intersection(reference.get_ref_chain()):
Nikita Titov's avatar
Nikita Titov committed
1283
            return self
Guolin Ke's avatar
Guolin Ke committed
1284
1285
        if self.data is not None:
            self.reference = reference
Nikita Titov's avatar
Nikita Titov committed
1286
            return self._free_handle()
Guolin Ke's avatar
Guolin Ke committed
1287
        else:
1288
1289
            raise LightGBMError("Cannot set reference after freed raw data, "
                                "set free_raw_data=False when construct Dataset to avoid this.")
Guolin Ke's avatar
Guolin Ke committed
1290
1291

    def set_feature_name(self, feature_name):
1292
        """Set feature name.
Guolin Ke's avatar
Guolin Ke committed
1293
1294
1295

        Parameters
        ----------
1296
1297
        feature_name : list of strings
            Feature names.
Nikita Titov's avatar
Nikita Titov committed
1298
1299
1300
1301
1302

        Returns
        -------
        self : Dataset
            Dataset with set feature name.
Guolin Ke's avatar
Guolin Ke committed
1303
        """
1304
1305
        if feature_name != 'auto':
            self.feature_name = feature_name
1306
        if self.handle is not None and feature_name is not None and feature_name != 'auto':
wxchan's avatar
wxchan committed
1307
            if len(feature_name) != self.num_feature():
1308
1309
                raise ValueError("Length of feature_name({}) and num_feature({}) don't match"
                                 .format(len(feature_name), self.num_feature()))
1310
            c_feature_name = [c_str(name) for name in feature_name]
wxchan's avatar
wxchan committed
1311
1312
1313
            _safe_call(_LIB.LGBM_DatasetSetFeatureNames(
                self.handle,
                c_array(ctypes.c_char_p, c_feature_name),
Guolin Ke's avatar
Guolin Ke committed
1314
                ctypes.c_int(len(feature_name))))
Nikita Titov's avatar
Nikita Titov committed
1315
        return self
Guolin Ke's avatar
Guolin Ke committed
1316
1317

    def set_label(self, label):
1318
        """Set label of Dataset.
Guolin Ke's avatar
Guolin Ke committed
1319
1320
1321

        Parameters
        ----------
1322
        label : list, numpy 1-D array, pandas Series / one-column DataFrame or None
1323
            The label information to be set into Dataset.
Nikita Titov's avatar
Nikita Titov committed
1324
1325
1326
1327
1328

        Returns
        -------
        self : Dataset
            Dataset with set label.
Guolin Ke's avatar
Guolin Ke committed
1329
1330
        """
        self.label = label
1331
        if self.handle is not None:
1332
            label = list_to_1d_numpy(_label_from_pandas(label), name='label')
wxchan's avatar
wxchan committed
1333
            self.set_field('label', label)
Nikita Titov's avatar
Nikita Titov committed
1334
        return self
Guolin Ke's avatar
Guolin Ke committed
1335
1336

    def set_weight(self, weight):
1337
        """Set weight of each instance.
Guolin Ke's avatar
Guolin Ke committed
1338
1339
1340

        Parameters
        ----------
1341
        weight : list, numpy 1-D array, pandas Series or None
1342
            Weight to be set for each data point.
Nikita Titov's avatar
Nikita Titov committed
1343
1344
1345
1346
1347

        Returns
        -------
        self : Dataset
            Dataset with set weight.
Guolin Ke's avatar
Guolin Ke committed
1348
        """
1349
1350
        if weight is not None and np.all(weight == 1):
            weight = None
Guolin Ke's avatar
Guolin Ke committed
1351
        self.weight = weight
1352
        if self.handle is not None and weight is not None:
wxchan's avatar
wxchan committed
1353
1354
            weight = list_to_1d_numpy(weight, name='weight')
            self.set_field('weight', weight)
Nikita Titov's avatar
Nikita Titov committed
1355
        return self
Guolin Ke's avatar
Guolin Ke committed
1356
1357

    def set_init_score(self, init_score):
1358
        """Set init score of Booster to start from.
Guolin Ke's avatar
Guolin Ke committed
1359
1360
1361

        Parameters
        ----------
1362
        init_score : list, numpy 1-D array, pandas Series or None
1363
            Init score for Booster.
Nikita Titov's avatar
Nikita Titov committed
1364
1365
1366
1367
1368

        Returns
        -------
        self : Dataset
            Dataset with set init score.
Guolin Ke's avatar
Guolin Ke committed
1369
1370
        """
        self.init_score = init_score
1371
        if self.handle is not None and init_score is not None:
Guolin Ke's avatar
Guolin Ke committed
1372
            init_score = list_to_1d_numpy(init_score, np.float64, name='init_score')
wxchan's avatar
wxchan committed
1373
            self.set_field('init_score', init_score)
Nikita Titov's avatar
Nikita Titov committed
1374
        return self
Guolin Ke's avatar
Guolin Ke committed
1375
1376

    def set_group(self, group):
1377
        """Set group size of Dataset (used for ranking).
Guolin Ke's avatar
Guolin Ke committed
1378
1379
1380

        Parameters
        ----------
1381
        group : list, numpy 1-D array, pandas Series or None
1382
            Group size of each group.
Nikita Titov's avatar
Nikita Titov committed
1383
1384
1385
1386
1387

        Returns
        -------
        self : Dataset
            Dataset with set group.
Guolin Ke's avatar
Guolin Ke committed
1388
1389
        """
        self.group = group
1390
        if self.handle is not None and group is not None:
wxchan's avatar
wxchan committed
1391
1392
            group = list_to_1d_numpy(group, np.int32, name='group')
            self.set_field('group', group)
Nikita Titov's avatar
Nikita Titov committed
1393
        return self
Guolin Ke's avatar
Guolin Ke committed
1394
1395

    def get_label(self):
1396
        """Get the label of the Dataset.
Guolin Ke's avatar
Guolin Ke committed
1397
1398
1399

        Returns
        -------
Nikita Titov's avatar
Nikita Titov committed
1400
        label : numpy array or None
1401
            The label information from the Dataset.
Guolin Ke's avatar
Guolin Ke committed
1402
        """
1403
        if self.label is None:
wxchan's avatar
wxchan committed
1404
            self.label = self.get_field('label')
Guolin Ke's avatar
Guolin Ke committed
1405
1406
1407
        return self.label

    def get_weight(self):
1408
        """Get the weight of the Dataset.
Guolin Ke's avatar
Guolin Ke committed
1409
1410
1411

        Returns
        -------
Nikita Titov's avatar
Nikita Titov committed
1412
        weight : numpy array or None
1413
            Weight for each data point from the Dataset.
Guolin Ke's avatar
Guolin Ke committed
1414
        """
1415
        if self.weight is None:
wxchan's avatar
wxchan committed
1416
            self.weight = self.get_field('weight')
Guolin Ke's avatar
Guolin Ke committed
1417
1418
        return self.weight

1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
    def get_feature_penalty(self):
        """Get the feature penalty of the Dataset.

        Returns
        -------
        feature_penalty : numpy array or None
            Feature penalty for each feature in the Dataset.
        """
        if self.feature_penalty is None:
            self.feature_penalty = self.get_field('feature_penalty')
        return self.feature_penalty

    def get_monotone_constraints(self):
        """Get the monotone constraints of the Dataset.

        Returns
        -------
        monotone_constraints : numpy array or None
            Monotone constraints: -1, 0 or 1, for each feature in the Dataset.
        """
        if self.monotone_constraints is None:
            self.monotone_constraints = self.get_field('monotone_constraints')
        return self.monotone_constraints

Guolin Ke's avatar
Guolin Ke committed
1443
    def get_init_score(self):
1444
        """Get the initial score of the Dataset.
Guolin Ke's avatar
Guolin Ke committed
1445
1446
1447

        Returns
        -------
Nikita Titov's avatar
Nikita Titov committed
1448
        init_score : numpy array or None
1449
            Init score of Booster.
Guolin Ke's avatar
Guolin Ke committed
1450
        """
1451
        if self.init_score is None:
wxchan's avatar
wxchan committed
1452
            self.init_score = self.get_field('init_score')
Guolin Ke's avatar
Guolin Ke committed
1453
1454
        return self.init_score

1455
1456
1457
1458
1459
    def get_data(self):
        """Get the raw data of the Dataset.

        Returns
        -------
1460
        data : string, numpy array, pandas DataFrame, H2O DataTable's Frame, scipy.sparse, list of numpy arrays or None
1461
1462
1463
1464
            Raw data used in the Dataset construction.
        """
        if self.handle is None:
            raise Exception("Cannot get data before construct Dataset")
Guolin Ke's avatar
Guolin Ke committed
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
        if self.need_slice and self.used_indices is not None and self.reference is not None:
            self.data = self.reference.data
            if self.data is not None:
                if isinstance(self.data, np.ndarray) or scipy.sparse.issparse(self.data):
                    self.data = self.data[self.used_indices, :]
                elif isinstance(self.data, DataFrame):
                    self.data = self.data.iloc[self.used_indices].copy()
                elif isinstance(self.data, DataTable):
                    self.data = self.data[self.used_indices, :]
                else:
                    warnings.warn("Cannot subset {} type of raw data.\n"
                                  "Returning original raw data".format(type(self.data).__name__))
1477
            self.need_slice = False
Guolin Ke's avatar
Guolin Ke committed
1478
1479
1480
        if self.data is None:
            raise LightGBMError("Cannot call `get_data` after freed raw data, "
                                "set free_raw_data=False when construct Dataset to avoid this.")
1481
1482
        return self.data

Guolin Ke's avatar
Guolin Ke committed
1483
    def get_group(self):
1484
        """Get the group of the Dataset.
Guolin Ke's avatar
Guolin Ke committed
1485
1486
1487

        Returns
        -------
Nikita Titov's avatar
Nikita Titov committed
1488
        group : numpy array or None
1489
            Group size of each group.
Guolin Ke's avatar
Guolin Ke committed
1490
        """
1491
        if self.group is None:
wxchan's avatar
wxchan committed
1492
            self.group = self.get_field('group')
Guolin Ke's avatar
Guolin Ke committed
1493
1494
            if self.group is not None:
                # group data from LightGBM is boundaries data, need to convert to group size
Nikita Titov's avatar
Nikita Titov committed
1495
                self.group = np.diff(self.group)
Guolin Ke's avatar
Guolin Ke committed
1496
1497
1498
        return self.group

    def num_data(self):
1499
        """Get the number of rows in the Dataset.
Guolin Ke's avatar
Guolin Ke committed
1500
1501
1502

        Returns
        -------
1503
1504
        number_of_rows : int
            The number of rows in the Dataset.
Guolin Ke's avatar
Guolin Ke committed
1505
        """
1506
        if self.handle is not None:
Guolin Ke's avatar
Guolin Ke committed
1507
            ret = ctypes.c_int()
wxchan's avatar
wxchan committed
1508
1509
1510
            _safe_call(_LIB.LGBM_DatasetGetNumData(self.handle,
                                                   ctypes.byref(ret)))
            return ret.value
Guolin Ke's avatar
Guolin Ke committed
1511
        else:
1512
            raise LightGBMError("Cannot get num_data before construct dataset")
Guolin Ke's avatar
Guolin Ke committed
1513
1514

    def num_feature(self):
1515
        """Get the number of columns (features) in the Dataset.
Guolin Ke's avatar
Guolin Ke committed
1516
1517
1518

        Returns
        -------
1519
1520
        number_of_columns : int
            The number of columns (features) in the Dataset.
Guolin Ke's avatar
Guolin Ke committed
1521
        """
1522
        if self.handle is not None:
Guolin Ke's avatar
Guolin Ke committed
1523
            ret = ctypes.c_int()
wxchan's avatar
wxchan committed
1524
1525
1526
            _safe_call(_LIB.LGBM_DatasetGetNumFeature(self.handle,
                                                      ctypes.byref(ret)))
            return ret.value
Guolin Ke's avatar
Guolin Ke committed
1527
        else:
1528
            raise LightGBMError("Cannot get num_feature before construct dataset")
Guolin Ke's avatar
Guolin Ke committed
1529

1530
    def get_ref_chain(self, ref_limit=100):
1531
1532
1533
1534
1535
        """Get a chain of Dataset objects.

        Starts with r, then goes to r.reference (if exists),
        then to r.reference.reference, etc.
        until we hit ``ref_limit`` or a reference loop.
1536
1537
1538
1539
1540

        Parameters
        ----------
        ref_limit : int, optional (default=100)
            The limit number of references.
1541
1542
1543

        Returns
        -------
1544
1545
1546
        ref_chain : set of Dataset
            Chain of references of the Datasets.
        """
1547
        head = self
1548
        ref_chain = set()
1549
1550
        while len(ref_chain) < ref_limit:
            if isinstance(head, Dataset):
1551
                ref_chain.add(head)
1552
1553
1554
1555
1556
1557
                if (head.reference is not None) and (head.reference not in ref_chain):
                    head = head.reference
                else:
                    break
            else:
                break
Nikita Titov's avatar
Nikita Titov committed
1558
        return ref_chain
1559

1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
    def add_features_from(self, other):
        """Add features from other Dataset to the current Dataset.

        Both Datasets must be constructed before calling this method.

        Parameters
        ----------
        other : Dataset
            The Dataset to take features from.

        Returns
        -------
        self : Dataset
            Dataset with the new features added.
        """
        if self.handle is None or other.handle is None:
            raise ValueError('Both source and target Datasets must be constructed before adding features')
        _safe_call(_LIB.LGBM_DatasetAddFeaturesFrom(self.handle, other.handle))
        return self

    def dump_text(self, filename):
        """Save Dataset to a text file.

        This format cannot be loaded back in by LightGBM, but is useful for debugging purposes.

        Parameters
        ----------
        filename : string
            Name of the output file.

        Returns
        -------
        self : Dataset
            Returns self.
        """
        _safe_call(_LIB.LGBM_DatasetDumpText(
            self.construct().handle,
            c_str(filename)))
        return self

wxchan's avatar
wxchan committed
1600

wxchan's avatar
wxchan committed
1601
class Booster(object):
1602
    """Booster in LightGBM."""
1603

1604
    def __init__(self, params=None, train_set=None, model_file=None, model_str=None, silent=False):
1605
        """Initialize the Booster.
wxchan's avatar
wxchan committed
1606
1607
1608

        Parameters
        ----------
Nikita Titov's avatar
Nikita Titov committed
1609
        params : dict or None, optional (default=None)
1610
1611
1612
1613
            Parameters for Booster.
        train_set : Dataset or None, optional (default=None)
            Training dataset.
        model_file : string or None, optional (default=None)
wxchan's avatar
wxchan committed
1614
            Path to the model file.
1615
1616
        model_str : string or None, optional (default=None)
            Model will be loaded from this string.
1617
1618
        silent : bool, optional (default=False)
            Whether to print messages during construction.
wxchan's avatar
wxchan committed
1619
        """
1620
        self.handle = None
1621
        self.network = False
wxchan's avatar
wxchan committed
1622
1623
1624
        self.__need_reload_eval_info = True
        self.__train_data_name = "training"
        self.__attr = {}
1625
        self.__set_objective_to_none = False
wxchan's avatar
wxchan committed
1626
        self.best_iteration = -1
wxchan's avatar
wxchan committed
1627
        self.best_score = {}
1628
        params = {} if params is None else copy.deepcopy(params)
1629
1630
1631
        # user can set verbose with params, it has higher priority
        if not any(verbose_alias in params for verbose_alias in ('verbose', 'verbosity')) and silent:
            params["verbose"] = -1
wxchan's avatar
wxchan committed
1632
        if train_set is not None:
1633
            # Training task
wxchan's avatar
wxchan committed
1634
            if not isinstance(train_set, Dataset):
1635
1636
                raise TypeError('Training data should be Dataset instance, met {}'
                                .format(type(train_set).__name__))
wxchan's avatar
wxchan committed
1637
            params_str = param_dict_to_str(params)
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
            # set network if necessary
            for alias in ["machines", "workers", "nodes"]:
                if alias in params:
                    machines = params[alias]
                    if isinstance(machines, string_type):
                        num_machines = len(machines.split(','))
                    elif isinstance(machines, (list, set)):
                        num_machines = len(machines)
                        machines = ','.join(machines)
                    else:
                        raise ValueError("Invalid machines in params.")
                    self.set_network(machines,
                                     local_listen_port=params.get("local_listen_port", 12400),
                                     listen_time_out=params.get("listen_time_out", 120),
                                     num_machines=params.get("num_machines", num_machines))
                    break
1654
            # construct booster object
1655
            self.handle = ctypes.c_void_p()
wxchan's avatar
wxchan committed
1656
            _safe_call(_LIB.LGBM_BoosterCreate(
wxchan's avatar
wxchan committed
1657
                train_set.construct().handle,
wxchan's avatar
wxchan committed
1658
1659
                c_str(params_str),
                ctypes.byref(self.handle)))
1660
            # save reference to data
wxchan's avatar
wxchan committed
1661
1662
1663
1664
            self.train_set = train_set
            self.valid_sets = []
            self.name_valid_sets = []
            self.__num_dataset = 1
Guolin Ke's avatar
Guolin Ke committed
1665
1666
            self.__init_predictor = train_set._predictor
            if self.__init_predictor is not None:
wxchan's avatar
wxchan committed
1667
1668
                _safe_call(_LIB.LGBM_BoosterMerge(
                    self.handle,
Guolin Ke's avatar
Guolin Ke committed
1669
                    self.__init_predictor.handle))
Guolin Ke's avatar
Guolin Ke committed
1670
            out_num_class = ctypes.c_int(0)
wxchan's avatar
wxchan committed
1671
1672
1673
1674
            _safe_call(_LIB.LGBM_BoosterGetNumClasses(
                self.handle,
                ctypes.byref(out_num_class)))
            self.__num_class = out_num_class.value
1675
            # buffer for inner predict
wxchan's avatar
wxchan committed
1676
1677
1678
            self.__inner_predict_buffer = [None]
            self.__is_predicted_cur_iter = [False]
            self.__get_eval_info()
1679
            self.pandas_categorical = train_set.pandas_categorical
wxchan's avatar
wxchan committed
1680
        elif model_file is not None:
1681
            # Prediction task
Guolin Ke's avatar
Guolin Ke committed
1682
            out_num_iterations = ctypes.c_int(0)
1683
            self.handle = ctypes.c_void_p()
wxchan's avatar
wxchan committed
1684
1685
1686
1687
            _safe_call(_LIB.LGBM_BoosterCreateFromModelfile(
                c_str(model_file),
                ctypes.byref(out_num_iterations),
                ctypes.byref(self.handle)))
Guolin Ke's avatar
Guolin Ke committed
1688
            out_num_class = ctypes.c_int(0)
wxchan's avatar
wxchan committed
1689
1690
1691
1692
            _safe_call(_LIB.LGBM_BoosterGetNumClasses(
                self.handle,
                ctypes.byref(out_num_class)))
            self.__num_class = out_num_class.value
1693
            self.pandas_categorical = _load_pandas_categorical(file_name=model_file)
1694
1695
        elif model_str is not None:
            self.model_from_string(model_str, not silent)
wxchan's avatar
wxchan committed
1696
        else:
1697
1698
            raise TypeError('Need at least one training dataset or model file or model string '
                            'to create Booster instance')
1699
        self.params = params
wxchan's avatar
wxchan committed
1700
1701

    def __del__(self):
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
        try:
            if self.network:
                self.free_network()
        except AttributeError:
            pass
        try:
            if self.handle is not None:
                _safe_call(_LIB.LGBM_BoosterFree(self.handle))
        except AttributeError:
            pass
wxchan's avatar
wxchan committed
1712

wxchan's avatar
wxchan committed
1713
1714
1715
1716
    def __copy__(self):
        return self.__deepcopy__(None)

    def __deepcopy__(self, _):
1717
        model_str = self.model_to_string(num_iteration=-1)
1718
        booster = Booster(model_str=model_str)
1719
        return booster
wxchan's avatar
wxchan committed
1720
1721
1722
1723
1724
1725
1726

    def __getstate__(self):
        this = self.__dict__.copy()
        handle = this['handle']
        this.pop('train_set', None)
        this.pop('valid_sets', None)
        if handle is not None:
1727
            this["handle"] = self.model_to_string(num_iteration=-1)
wxchan's avatar
wxchan committed
1728
1729
1730
        return this

    def __setstate__(self, state):
1731
1732
        model_str = state.get('handle', None)
        if model_str is not None:
wxchan's avatar
wxchan committed
1733
            handle = ctypes.c_void_p()
Guolin Ke's avatar
Guolin Ke committed
1734
            out_num_iterations = ctypes.c_int(0)
1735
1736
1737
1738
            _safe_call(_LIB.LGBM_BoosterLoadModelFromString(
                c_str(model_str),
                ctypes.byref(out_num_iterations),
                ctypes.byref(handle)))
wxchan's avatar
wxchan committed
1739
1740
1741
            state['handle'] = handle
        self.__dict__.update(state)

wxchan's avatar
wxchan committed
1742
    def free_dataset(self):
Nikita Titov's avatar
Nikita Titov committed
1743
1744
1745
1746
1747
1748
1749
        """Free Booster's Datasets.

        Returns
        -------
        self : Booster
            Booster without Datasets.
        """
wxchan's avatar
wxchan committed
1750
1751
        self.__dict__.pop('train_set', None)
        self.__dict__.pop('valid_sets', None)
1752
        self.__num_dataset = 0
Nikita Titov's avatar
Nikita Titov committed
1753
        return self
wxchan's avatar
wxchan committed
1754

1755
1756
1757
    def _free_buffer(self):
        self.__inner_predict_buffer = []
        self.__is_predicted_cur_iter = []
Nikita Titov's avatar
Nikita Titov committed
1758
        return self
1759

1760
1761
1762
1763
1764
1765
    def set_network(self, machines, local_listen_port=12400,
                    listen_time_out=120, num_machines=1):
        """Set the network configuration.

        Parameters
        ----------
Nikita Titov's avatar
Nikita Titov committed
1766
        machines : list, set or string
1767
            Names of machines.
Nikita Titov's avatar
Nikita Titov committed
1768
        local_listen_port : int, optional (default=12400)
1769
            TCP listen port for local machines.
Nikita Titov's avatar
Nikita Titov committed
1770
        listen_time_out : int, optional (default=120)
1771
            Socket time-out in minutes.
Nikita Titov's avatar
Nikita Titov committed
1772
        num_machines : int, optional (default=1)
1773
            The number of machines for parallel learning application.
Nikita Titov's avatar
Nikita Titov committed
1774
1775
1776
1777
1778

        Returns
        -------
        self : Booster
            Booster with set network.
1779
1780
1781
1782
1783
1784
        """
        _safe_call(_LIB.LGBM_NetworkInit(c_str(machines),
                                         ctypes.c_int(local_listen_port),
                                         ctypes.c_int(listen_time_out),
                                         ctypes.c_int(num_machines)))
        self.network = True
Nikita Titov's avatar
Nikita Titov committed
1785
        return self
1786
1787

    def free_network(self):
Nikita Titov's avatar
Nikita Titov committed
1788
1789
1790
1791
1792
1793
1794
        """Free Booster's network.

        Returns
        -------
        self : Booster
            Booster with freed network.
        """
1795
1796
        _safe_call(_LIB.LGBM_NetworkFree())
        self.network = False
Nikita Titov's avatar
Nikita Titov committed
1797
        return self
1798

wxchan's avatar
wxchan committed
1799
    def set_train_data_name(self, name):
1800
1801
1802
1803
        """Set the name to the training Dataset.

        Parameters
        ----------
Nikita Titov's avatar
Nikita Titov committed
1804
1805
1806
1807
1808
1809
1810
        name : string
            Name for the training Dataset.

        Returns
        -------
        self : Booster
            Booster with set training Dataset name.
1811
        """
wxchan's avatar
wxchan committed
1812
        self.__train_data_name = name
Nikita Titov's avatar
Nikita Titov committed
1813
        return self
wxchan's avatar
wxchan committed
1814
1815

    def add_valid(self, data, name):
1816
        """Add validation data.
wxchan's avatar
wxchan committed
1817
1818
1819
1820

        Parameters
        ----------
        data : Dataset
1821
1822
1823
            Validation data.
        name : string
            Name of validation data.
Nikita Titov's avatar
Nikita Titov committed
1824
1825
1826
1827
1828

        Returns
        -------
        self : Booster
            Booster with set validation data.
wxchan's avatar
wxchan committed
1829
        """
Guolin Ke's avatar
Guolin Ke committed
1830
        if not isinstance(data, Dataset):
1831
1832
            raise TypeError('Validation data should be Dataset instance, met {}'
                            .format(type(data).__name__))
Guolin Ke's avatar
Guolin Ke committed
1833
        if data._predictor is not self.__init_predictor:
1834
1835
            raise LightGBMError("Add validation data failed, "
                                "you should use same predictor for these data")
wxchan's avatar
wxchan committed
1836
1837
        _safe_call(_LIB.LGBM_BoosterAddValidData(
            self.handle,
wxchan's avatar
wxchan committed
1838
            data.construct().handle))
wxchan's avatar
wxchan committed
1839
1840
1841
1842
1843
        self.valid_sets.append(data)
        self.name_valid_sets.append(name)
        self.__num_dataset += 1
        self.__inner_predict_buffer.append(None)
        self.__is_predicted_cur_iter.append(False)
Nikita Titov's avatar
Nikita Titov committed
1844
        return self
wxchan's avatar
wxchan committed
1845
1846

    def reset_parameter(self, params):
1847
        """Reset parameters of Booster.
wxchan's avatar
wxchan committed
1848
1849
1850
1851

        Parameters
        ----------
        params : dict
1852
            New parameters for Booster.
Nikita Titov's avatar
Nikita Titov committed
1853
1854
1855
1856
1857

        Returns
        -------
        self : Booster
            Booster with new parameters.
wxchan's avatar
wxchan committed
1858
        """
1859
        if any(metric_alias in params for metric_alias in ('metric', 'metrics', 'metric_types')):
wxchan's avatar
wxchan committed
1860
1861
1862
1863
1864
1865
            self.__need_reload_eval_info = True
        params_str = param_dict_to_str(params)
        if params_str:
            _safe_call(_LIB.LGBM_BoosterResetParameter(
                self.handle,
                c_str(params_str)))
Guolin Ke's avatar
Guolin Ke committed
1866
        self.params.update(params)
Nikita Titov's avatar
Nikita Titov committed
1867
        return self
wxchan's avatar
wxchan committed
1868
1869

    def update(self, train_set=None, fobj=None):
Nikita Titov's avatar
Nikita Titov committed
1870
        """Update Booster for one iteration.
1871

wxchan's avatar
wxchan committed
1872
1873
        Parameters
        ----------
1874
1875
1876
1877
        train_set : Dataset or None, optional (default=None)
            Training data.
            If None, last training data is used.
        fobj : callable or None, optional (default=None)
wxchan's avatar
wxchan committed
1878
            Customized objective function.
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
            Should accept two parameters: preds, train_data,
            and return (grad, hess).

                preds : list or numpy 1-D array
                    The predicted values.
                train_data : Dataset
                    The training dataset.
                grad : list or numpy 1-D array
                    The value of the first order derivative (gradient) for each sample point.
                hess : list or numpy 1-D array
                    The value of the second order derivative (Hessian) for each sample point.
wxchan's avatar
wxchan committed
1890

1891
1892
            For multi-class task, the preds is group by class_id first, then group by row_id.
            If you want to get i-th row preds in j-th class, the access way is score[j * num_data + i]
1893
1894
            and you should group grad and hess in this way as well.

wxchan's avatar
wxchan committed
1895
1896
        Returns
        -------
1897
1898
        is_finished : bool
            Whether the update was successfully finished.
wxchan's avatar
wxchan committed
1899
        """
1900
        # need reset training data
wxchan's avatar
wxchan committed
1901
        if train_set is not None and train_set is not self.train_set:
Guolin Ke's avatar
Guolin Ke committed
1902
            if not isinstance(train_set, Dataset):
1903
1904
                raise TypeError('Training data should be Dataset instance, met {}'
                                .format(type(train_set).__name__))
Guolin Ke's avatar
Guolin Ke committed
1905
            if train_set._predictor is not self.__init_predictor:
1906
1907
                raise LightGBMError("Replace training data failed, "
                                    "you should use same predictor for these data")
wxchan's avatar
wxchan committed
1908
1909
1910
            self.train_set = train_set
            _safe_call(_LIB.LGBM_BoosterResetTrainingData(
                self.handle,
wxchan's avatar
wxchan committed
1911
                self.train_set.construct().handle))
wxchan's avatar
wxchan committed
1912
1913
1914
            self.__inner_predict_buffer[0] = None
        is_finished = ctypes.c_int(0)
        if fobj is None:
1915
            if self.__set_objective_to_none:
1916
                raise LightGBMError('Cannot update due to null objective function.')
wxchan's avatar
wxchan committed
1917
1918
1919
            _safe_call(_LIB.LGBM_BoosterUpdateOneIter(
                self.handle,
                ctypes.byref(is_finished)))
wxchan's avatar
wxchan committed
1920
            self.__is_predicted_cur_iter = [False for _ in range_(self.__num_dataset)]
wxchan's avatar
wxchan committed
1921
1922
            return is_finished.value == 1
        else:
1923
            if not self.__set_objective_to_none:
Nikita Titov's avatar
Nikita Titov committed
1924
                self.reset_parameter({"objective": "none"}).__set_objective_to_none = True
wxchan's avatar
wxchan committed
1925
1926
1927
1928
            grad, hess = fobj(self.__inner_predict(0), self.train_set)
            return self.__boost(grad, hess)

    def __boost(self, grad, hess):
1929
        """Boost Booster for one iteration with customized gradient statistics.
Nikita Titov's avatar
Nikita Titov committed
1930

1931
1932
1933
1934
1935
        Note
        ----
        For multi-class task, the score is group by class_id first, then group by row_id.
        If you want to get i-th row score in j-th class, the access way is score[j * num_data + i]
        and you should group grad and hess in this way as well.
1936

wxchan's avatar
wxchan committed
1937
1938
        Parameters
        ----------
1939
        grad : list or numpy 1-D array
Nikita Titov's avatar
Nikita Titov committed
1940
            The first order derivative (gradient).
1941
        hess : list or numpy 1-D array
Nikita Titov's avatar
Nikita Titov committed
1942
            The second order derivative (Hessian).
wxchan's avatar
wxchan committed
1943
1944
1945

        Returns
        -------
Nikita Titov's avatar
Nikita Titov committed
1946
1947
        is_finished : bool
            Whether the boost was successfully finished.
wxchan's avatar
wxchan committed
1948
        """
1949
1950
        grad = list_to_1d_numpy(grad, name='gradient')
        hess = list_to_1d_numpy(hess, name='hessian')
1951
1952
        assert grad.flags.c_contiguous
        assert hess.flags.c_contiguous
wxchan's avatar
wxchan committed
1953
        if len(grad) != len(hess):
1954
1955
            raise ValueError("Lengths of gradient({}) and hessian({}) don't match"
                             .format(len(grad), len(hess)))
wxchan's avatar
wxchan committed
1956
1957
1958
1959
1960
1961
        is_finished = ctypes.c_int(0)
        _safe_call(_LIB.LGBM_BoosterUpdateOneIterCustom(
            self.handle,
            grad.ctypes.data_as(ctypes.POINTER(ctypes.c_float)),
            hess.ctypes.data_as(ctypes.POINTER(ctypes.c_float)),
            ctypes.byref(is_finished)))
wxchan's avatar
wxchan committed
1962
        self.__is_predicted_cur_iter = [False for _ in range_(self.__num_dataset)]
wxchan's avatar
wxchan committed
1963
1964
1965
        return is_finished.value == 1

    def rollback_one_iter(self):
Nikita Titov's avatar
Nikita Titov committed
1966
1967
1968
1969
1970
1971
1972
        """Rollback one iteration.

        Returns
        -------
        self : Booster
            Booster with rolled back one iteration.
        """
wxchan's avatar
wxchan committed
1973
1974
        _safe_call(_LIB.LGBM_BoosterRollbackOneIter(
            self.handle))
wxchan's avatar
wxchan committed
1975
        self.__is_predicted_cur_iter = [False for _ in range_(self.__num_dataset)]
Nikita Titov's avatar
Nikita Titov committed
1976
        return self
wxchan's avatar
wxchan committed
1977
1978

    def current_iteration(self):
1979
1980
1981
1982
1983
1984
1985
        """Get the index of the current iteration.

        Returns
        -------
        cur_iter : int
            The index of the current iteration.
        """
Guolin Ke's avatar
Guolin Ke committed
1986
        out_cur_iter = ctypes.c_int(0)
wxchan's avatar
wxchan committed
1987
1988
1989
1990
1991
        _safe_call(_LIB.LGBM_BoosterGetCurrentIteration(
            self.handle,
            ctypes.byref(out_cur_iter)))
        return out_cur_iter.value

1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
    def num_model_per_iteration(self):
        """Get number of models per iteration.

        Returns
        -------
        model_per_iter : int
            The number of models per iteration.
        """
        model_per_iter = ctypes.c_int(0)
        _safe_call(_LIB.LGBM_BoosterNumModelPerIteration(
            self.handle,
            ctypes.byref(model_per_iter)))
        return model_per_iter.value

    def num_trees(self):
        """Get number of weak sub-models.

        Returns
        -------
        num_trees : int
            The number of weak sub-models.
        """
        num_trees = ctypes.c_int(0)
        _safe_call(_LIB.LGBM_BoosterNumberOfTotalModel(
            self.handle,
            ctypes.byref(num_trees)))
        return num_trees.value

wxchan's avatar
wxchan committed
2020
    def eval(self, data, name, feval=None):
2021
        """Evaluate for data.
wxchan's avatar
wxchan committed
2022
2023
2024

        Parameters
        ----------
2025
2026
2027
2028
2029
        data : Dataset
            Data for the evaluating.
        name : string
            Name of the data.
        feval : callable or None, optional (default=None)
2030
            Customized evaluation function.
2031
            Should accept two parameters: preds, eval_data,
2032
            and return (eval_name, eval_result, is_higher_better) or list of such tuples.
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044

                preds : list or numpy 1-D array
                    The predicted values.
                eval_data : Dataset
                    The evaluation dataset.
                eval_name : string
                    The name of evaluation function.
                eval_result : float
                    The eval result.
                is_higher_better : bool
                    Is eval result higher better, e.g. AUC is ``is_higher_better``.

2045
2046
            For multi-class task, the preds is group by class_id first, then group by row_id.
            If you want to get i-th row preds in j-th class, the access way is preds[j * num_data + i].
2047

wxchan's avatar
wxchan committed
2048
2049
        Returns
        -------
Nikita Titov's avatar
Nikita Titov committed
2050
        result : list
2051
            List with evaluation results.
wxchan's avatar
wxchan committed
2052
        """
Guolin Ke's avatar
Guolin Ke committed
2053
2054
        if not isinstance(data, Dataset):
            raise TypeError("Can only eval for Dataset instance")
wxchan's avatar
wxchan committed
2055
2056
2057
2058
        data_idx = -1
        if data is self.train_set:
            data_idx = 0
        else:
wxchan's avatar
wxchan committed
2059
            for i in range_(len(self.valid_sets)):
wxchan's avatar
wxchan committed
2060
2061
2062
                if data is self.valid_sets[i]:
                    data_idx = i + 1
                    break
2063
        # need to push new valid data
wxchan's avatar
wxchan committed
2064
2065
2066
2067
2068
2069
2070
        if data_idx == -1:
            self.add_valid(data, name)
            data_idx = self.__num_dataset - 1

        return self.__inner_eval(name, data_idx, feval)

    def eval_train(self, feval=None):
2071
        """Evaluate for training data.
wxchan's avatar
wxchan committed
2072
2073
2074

        Parameters
        ----------
2075
        feval : callable or None, optional (default=None)
2076
            Customized evaluation function.
2077
2078
            Should accept two parameters: preds, train_data,
            and return (eval_name, eval_result, is_higher_better) or list of such tuples.
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090

                preds : list or numpy 1-D array
                    The predicted values.
                train_data : Dataset
                    The training dataset.
                eval_name : string
                    The name of evaluation function.
                eval_result : float
                    The eval result.
                is_higher_better : bool
                    Is eval result higher better, e.g. AUC is ``is_higher_better``.

2091
2092
            For multi-class task, the preds is group by class_id first, then group by row_id.
            If you want to get i-th row preds in j-th class, the access way is preds[j * num_data + i].
wxchan's avatar
wxchan committed
2093
2094
2095

        Returns
        -------
Nikita Titov's avatar
Nikita Titov committed
2096
        result : list
2097
            List with evaluation results.
wxchan's avatar
wxchan committed
2098
2099
2100
2101
        """
        return self.__inner_eval(self.__train_data_name, 0, feval)

    def eval_valid(self, feval=None):
2102
        """Evaluate for validation data.
wxchan's avatar
wxchan committed
2103
2104
2105

        Parameters
        ----------
2106
        feval : callable or None, optional (default=None)
2107
            Customized evaluation function.
2108
            Should accept two parameters: preds, valid_data,
2109
            and return (eval_name, eval_result, is_higher_better) or list of such tuples.
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121

                preds : list or numpy 1-D array
                    The predicted values.
                valid_data : Dataset
                    The validation dataset.
                eval_name : string
                    The name of evaluation function.
                eval_result : float
                    The eval result.
                is_higher_better : bool
                    Is eval result higher better, e.g. AUC is ``is_higher_better``.

2122
2123
            For multi-class task, the preds is group by class_id first, then group by row_id.
            If you want to get i-th row preds in j-th class, the access way is preds[j * num_data + i].
wxchan's avatar
wxchan committed
2124
2125
2126

        Returns
        -------
Nikita Titov's avatar
Nikita Titov committed
2127
        result : list
2128
            List with evaluation results.
wxchan's avatar
wxchan committed
2129
        """
wxchan's avatar
wxchan committed
2130
        return [item for i in range_(1, self.__num_dataset)
wxchan's avatar
wxchan committed
2131
                for item in self.__inner_eval(self.name_valid_sets[i - 1], i, feval)]
wxchan's avatar
wxchan committed
2132

2133
    def save_model(self, filename, num_iteration=None, start_iteration=0):
2134
        """Save Booster to file.
wxchan's avatar
wxchan committed
2135
2136
2137

        Parameters
        ----------
2138
2139
        filename : string
            Filename to save Booster.
2140
2141
2142
2143
        num_iteration : int or None, optional (default=None)
            Index of the iteration that should be saved.
            If None, if the best iteration exists, it is saved; otherwise, all iterations are saved.
            If <= 0, all iterations are saved.
Nikita Titov's avatar
Nikita Titov committed
2144
        start_iteration : int, optional (default=0)
2145
            Start index of the iteration that should be saved.
Nikita Titov's avatar
Nikita Titov committed
2146
2147
2148
2149
2150

        Returns
        -------
        self : Booster
            Returns self.
wxchan's avatar
wxchan committed
2151
        """
2152
        if num_iteration is None:
2153
            num_iteration = self.best_iteration
wxchan's avatar
wxchan committed
2154
2155
        _safe_call(_LIB.LGBM_BoosterSaveModel(
            self.handle,
2156
            ctypes.c_int(start_iteration),
Guolin Ke's avatar
Guolin Ke committed
2157
            ctypes.c_int(num_iteration),
wxchan's avatar
wxchan committed
2158
            c_str(filename)))
2159
        _dump_pandas_categorical(self.pandas_categorical, filename)
Nikita Titov's avatar
Nikita Titov committed
2160
        return self
wxchan's avatar
wxchan committed
2161

2162
    def shuffle_models(self, start_iteration=0, end_iteration=-1):
2163
        """Shuffle models.
Nikita Titov's avatar
Nikita Titov committed
2164

2165
2166
2167
        Parameters
        ----------
        start_iteration : int, optional (default=0)
2168
            The first iteration that will be shuffled.
2169
2170
        end_iteration : int, optional (default=-1)
            The last iteration that will be shuffled.
2171
            If <= 0, means the last available iteration.
2172

Nikita Titov's avatar
Nikita Titov committed
2173
2174
2175
2176
        Returns
        -------
        self : Booster
            Booster with shuffled models.
2177
        """
2178
2179
        _safe_call(_LIB.LGBM_BoosterShuffleModels(
            self.handle,
Guolin Ke's avatar
Guolin Ke committed
2180
2181
            ctypes.c_int(start_iteration),
            ctypes.c_int(end_iteration)))
Nikita Titov's avatar
Nikita Titov committed
2182
        return self
2183
2184
2185
2186
2187
2188

    def model_from_string(self, model_str, verbose=True):
        """Load Booster from a string.

        Parameters
        ----------
Nikita Titov's avatar
Nikita Titov committed
2189
        model_str : string
2190
            Model will be loaded from this string.
Nikita Titov's avatar
Nikita Titov committed
2191
2192
        verbose : bool, optional (default=True)
            Whether to print messages while loading model.
2193
2194
2195

        Returns
        -------
Nikita Titov's avatar
Nikita Titov committed
2196
        self : Booster
2197
2198
            Loaded Booster object.
        """
2199
2200
2201
2202
        if self.handle is not None:
            _safe_call(_LIB.LGBM_BoosterFree(self.handle))
        self._free_buffer()
        self.handle = ctypes.c_void_p()
2203
2204
2205
2206
2207
2208
2209
2210
2211
        out_num_iterations = ctypes.c_int(0)
        _safe_call(_LIB.LGBM_BoosterLoadModelFromString(
            c_str(model_str),
            ctypes.byref(out_num_iterations),
            ctypes.byref(self.handle)))
        out_num_class = ctypes.c_int(0)
        _safe_call(_LIB.LGBM_BoosterGetNumClasses(
            self.handle,
            ctypes.byref(out_num_class)))
2212
        if verbose:
Nikita Titov's avatar
Nikita Titov committed
2213
            print('Finished loading model, total used %d iterations' % int(out_num_iterations.value))
2214
        self.__num_class = out_num_class.value
2215
        self.pandas_categorical = _load_pandas_categorical(model_str=model_str)
2216
2217
2218
2219
        return self

    def model_to_string(self, num_iteration=None, start_iteration=0):
        """Save Booster to string.
2220

2221
2222
2223
2224
2225
2226
        Parameters
        ----------
        num_iteration : int or None, optional (default=None)
            Index of the iteration that should be saved.
            If None, if the best iteration exists, it is saved; otherwise, all iterations are saved.
            If <= 0, all iterations are saved.
Nikita Titov's avatar
Nikita Titov committed
2227
        start_iteration : int, optional (default=0)
2228
2229
2230
2231
            Start index of the iteration that should be saved.

        Returns
        -------
Nikita Titov's avatar
Nikita Titov committed
2232
        str_repr : string
2233
2234
            String representation of Booster.
        """
2235
        if num_iteration is None:
2236
2237
            num_iteration = self.best_iteration
        buffer_len = 1 << 20
2238
        tmp_out_len = ctypes.c_int64(0)
2239
2240
2241
2242
        string_buffer = ctypes.create_string_buffer(buffer_len)
        ptr_string_buffer = ctypes.c_char_p(*[ctypes.addressof(string_buffer)])
        _safe_call(_LIB.LGBM_BoosterSaveModelToString(
            self.handle,
2243
            ctypes.c_int(start_iteration),
2244
            ctypes.c_int(num_iteration),
2245
            ctypes.c_int64(buffer_len),
2246
2247
2248
            ctypes.byref(tmp_out_len),
            ptr_string_buffer))
        actual_len = tmp_out_len.value
2249
        # if buffer length is not long enough, re-allocate a buffer
2250
2251
2252
2253
2254
        if actual_len > buffer_len:
            string_buffer = ctypes.create_string_buffer(actual_len)
            ptr_string_buffer = ctypes.c_char_p(*[ctypes.addressof(string_buffer)])
            _safe_call(_LIB.LGBM_BoosterSaveModelToString(
                self.handle,
2255
                ctypes.c_int(start_iteration),
2256
                ctypes.c_int(num_iteration),
2257
                ctypes.c_int64(actual_len),
2258
2259
                ctypes.byref(tmp_out_len),
                ptr_string_buffer))
2260
2261
2262
        ret = string_buffer.value.decode()
        ret += _dump_pandas_categorical(self.pandas_categorical)
        return ret
2263

2264
    def dump_model(self, num_iteration=None, start_iteration=0):
Nikita Titov's avatar
Nikita Titov committed
2265
        """Dump Booster to JSON format.
wxchan's avatar
wxchan committed
2266

2267
2268
        Parameters
        ----------
2269
2270
2271
2272
        num_iteration : int or None, optional (default=None)
            Index of the iteration that should be dumped.
            If None, if the best iteration exists, it is dumped; otherwise, all iterations are dumped.
            If <= 0, all iterations are dumped.
Nikita Titov's avatar
Nikita Titov committed
2273
        start_iteration : int, optional (default=0)
2274
            Start index of the iteration that should be dumped.
2275

wxchan's avatar
wxchan committed
2276
2277
        Returns
        -------
2278
        json_repr : dict
Nikita Titov's avatar
Nikita Titov committed
2279
            JSON format of Booster.
wxchan's avatar
wxchan committed
2280
        """
2281
        if num_iteration is None:
2282
            num_iteration = self.best_iteration
wxchan's avatar
wxchan committed
2283
        buffer_len = 1 << 20
2284
        tmp_out_len = ctypes.c_int64(0)
wxchan's avatar
wxchan committed
2285
2286
2287
2288
        string_buffer = ctypes.create_string_buffer(buffer_len)
        ptr_string_buffer = ctypes.c_char_p(*[ctypes.addressof(string_buffer)])
        _safe_call(_LIB.LGBM_BoosterDumpModel(
            self.handle,
2289
            ctypes.c_int(start_iteration),
Guolin Ke's avatar
Guolin Ke committed
2290
            ctypes.c_int(num_iteration),
2291
            ctypes.c_int64(buffer_len),
wxchan's avatar
wxchan committed
2292
            ctypes.byref(tmp_out_len),
Guolin Ke's avatar
Guolin Ke committed
2293
            ptr_string_buffer))
wxchan's avatar
wxchan committed
2294
        actual_len = tmp_out_len.value
2295
        # if buffer length is not long enough, reallocate a buffer
wxchan's avatar
wxchan committed
2296
2297
2298
2299
2300
        if actual_len > buffer_len:
            string_buffer = ctypes.create_string_buffer(actual_len)
            ptr_string_buffer = ctypes.c_char_p(*[ctypes.addressof(string_buffer)])
            _safe_call(_LIB.LGBM_BoosterDumpModel(
                self.handle,
2301
                ctypes.c_int(start_iteration),
Guolin Ke's avatar
Guolin Ke committed
2302
                ctypes.c_int(num_iteration),
2303
                ctypes.c_int64(actual_len),
wxchan's avatar
wxchan committed
2304
                ctypes.byref(tmp_out_len),
Guolin Ke's avatar
Guolin Ke committed
2305
                ptr_string_buffer))
2306
2307
2308
2309
        ret = json.loads(string_buffer.value.decode())
        ret['pandas_categorical'] = json.loads(json.dumps(self.pandas_categorical,
                                                          default=json_default_with_numpy))
        return ret
wxchan's avatar
wxchan committed
2310

2311
2312
    def predict(self, data, num_iteration=None,
                raw_score=False, pred_leaf=False, pred_contrib=False,
2313
                data_has_header=False, is_reshape=True, **kwargs):
2314
        """Make a prediction.
wxchan's avatar
wxchan committed
2315
2316
2317

        Parameters
        ----------
2318
        data : string, numpy array, pandas DataFrame, H2O DataTable's Frame or scipy.sparse
2319
2320
            Data source for prediction.
            If string, it represents the path to txt file.
2321
2322
2323
2324
        num_iteration : int or None, optional (default=None)
            Limit number of iterations in the prediction.
            If None, if the best iteration exists, it is used; otherwise, all iterations are used.
            If <= 0, all iterations are used (no limits).
2325
2326
2327
2328
        raw_score : bool, optional (default=False)
            Whether to predict raw scores.
        pred_leaf : bool, optional (default=False)
            Whether to predict leaf index.
2329
2330
        pred_contrib : bool, optional (default=False)
            Whether to predict feature contributions.
2331
2332
2333

            Note
            ----
2334
            If you want to get more explanations for your model's predictions using SHAP values,
2335
            like SHAP interaction values,
2336
2337
2338
            you can install the shap package (https://github.com/slundberg/shap).
            Note that unlike the shap package, with ``pred_contrib`` we return a matrix with an extra
            column, where the last column is the expected value.
2339

2340
2341
2342
2343
2344
        data_has_header : bool, optional (default=False)
            Whether the data has header.
            Used only if data is string.
        is_reshape : bool, optional (default=True)
            If True, result is reshaped to [nrow, ncol].
2345
2346
        **kwargs
            Other parameters for the prediction.
wxchan's avatar
wxchan committed
2347
2348
2349

        Returns
        -------
2350
2351
        result : numpy array
            Prediction result.
wxchan's avatar
wxchan committed
2352
        """
2353
        predictor = self._to_predictor(copy.deepcopy(kwargs))
2354
        if num_iteration is None:
2355
            num_iteration = self.best_iteration
2356
2357
2358
        return predictor.predict(data, num_iteration,
                                 raw_score, pred_leaf, pred_contrib,
                                 data_has_header, is_reshape)
wxchan's avatar
wxchan committed
2359

2360
    def refit(self, data, label, decay_rate=0.9, **kwargs):
Guolin Ke's avatar
Guolin Ke committed
2361
2362
2363
2364
        """Refit the existing Booster by new data.

        Parameters
        ----------
2365
        data : string, numpy array, pandas DataFrame, H2O DataTable's Frame or scipy.sparse
Guolin Ke's avatar
Guolin Ke committed
2366
2367
            Data source for refit.
            If string, it represents the path to txt file.
2368
        label : list, numpy 1-D array or pandas Series / one-column DataFrame
Guolin Ke's avatar
Guolin Ke committed
2369
2370
            Label for refit.
        decay_rate : float, optional (default=0.9)
2371
2372
            Decay rate of refit,
            will use ``leaf_output = decay_rate * old_leaf_output + (1.0 - decay_rate) * new_leaf_output`` to refit trees.
2373
2374
        **kwargs
            Other parameters for refit.
2375
            These parameters will be passed to ``predict`` method.
Guolin Ke's avatar
Guolin Ke committed
2376
2377
2378
2379
2380
2381

        Returns
        -------
        result : Booster
            Refitted Booster.
        """
2382
2383
        if self.__set_objective_to_none:
            raise LightGBMError('Cannot refit due to null objective function.')
2384
        predictor = self._to_predictor(copy.deepcopy(kwargs))
2385
        leaf_preds = predictor.predict(data, -1, pred_leaf=True)
2386
        nrow, ncol = leaf_preds.shape
2387
        train_set = Dataset(data, label, silent=True)
2388
2389
2390
        new_params = copy.deepcopy(self.params)
        new_params['refit_decay_rate'] = decay_rate
        new_booster = Booster(new_params, train_set, silent=True)
Guolin Ke's avatar
Guolin Ke committed
2391
2392
2393
2394
2395
2396
2397
2398
2399
2400
2401
        # Copy models
        _safe_call(_LIB.LGBM_BoosterMerge(
            new_booster.handle,
            predictor.handle))
        leaf_preds = leaf_preds.reshape(-1)
        ptr_data, type_ptr_data, _ = c_int_array(leaf_preds)
        _safe_call(_LIB.LGBM_BoosterRefit(
            new_booster.handle,
            ptr_data,
            ctypes.c_int(nrow),
            ctypes.c_int(ncol)))
2402
2403
        new_booster.network = self.network
        new_booster.__attr = self.__attr.copy()
Guolin Ke's avatar
Guolin Ke committed
2404
2405
        return new_booster

2406
    def get_leaf_output(self, tree_id, leaf_id):
2407
2408
2409
2410
2411
2412
2413
2414
2415
2416
2417
2418
2419
2420
        """Get the output of a leaf.

        Parameters
        ----------
        tree_id : int
            The index of the tree.
        leaf_id : int
            The index of the leaf in the tree.

        Returns
        -------
        result : float
            The output of the leaf.
        """
2421
2422
2423
2424
2425
2426
2427
2428
        ret = ctypes.c_double(0)
        _safe_call(_LIB.LGBM_BoosterGetLeafValue(
            self.handle,
            ctypes.c_int(tree_id),
            ctypes.c_int(leaf_id),
            ctypes.byref(ret)))
        return ret.value

2429
    def _to_predictor(self, pred_parameter=None):
2430
        """Convert to predictor."""
2431
        predictor = _InnerPredictor(booster_handle=self.handle, pred_parameter=pred_parameter)
2432
        predictor.pandas_categorical = self.pandas_categorical
wxchan's avatar
wxchan committed
2433
2434
        return predictor

2435
    def num_feature(self):
2436
2437
2438
2439
2440
2441
2442
        """Get number of features.

        Returns
        -------
        num_feature : int
            The number of features.
        """
2443
2444
2445
2446
2447
2448
        out_num_feature = ctypes.c_int(0)
        _safe_call(_LIB.LGBM_BoosterGetNumFeature(
            self.handle,
            ctypes.byref(out_num_feature)))
        return out_num_feature.value

wxchan's avatar
wxchan committed
2449
    def feature_name(self):
2450
        """Get names of features.
wxchan's avatar
wxchan committed
2451
2452
2453

        Returns
        -------
2454
2455
        result : list
            List with names of features.
wxchan's avatar
wxchan committed
2456
        """
2457
        num_feature = self.num_feature()
2458
        # Get name of features
wxchan's avatar
wxchan committed
2459
2460
2461
2462
2463
2464
2465
2466
2467
2468
2469
        tmp_out_len = ctypes.c_int(0)
        string_buffers = [ctypes.create_string_buffer(255) for i in range_(num_feature)]
        ptr_string_buffers = (ctypes.c_char_p * num_feature)(*map(ctypes.addressof, string_buffers))
        _safe_call(_LIB.LGBM_BoosterGetFeatureNames(
            self.handle,
            ctypes.byref(tmp_out_len),
            ptr_string_buffers))
        if num_feature != tmp_out_len.value:
            raise ValueError("Length of feature names doesn't equal with num_feature")
        return [string_buffers[i].value.decode() for i in range_(num_feature)]

2470
    def feature_importance(self, importance_type='split', iteration=None):
2471
        """Get feature importances.
2472

2473
2474
        Parameters
        ----------
2475
2476
2477
2478
        importance_type : string, optional (default="split")
            How the importance is calculated.
            If "split", result contains numbers of times the feature is used in a model.
            If "gain", result contains total gains of splits which use the feature.
2479
2480
2481
2482
        iteration : int or None, optional (default=None)
            Limit number of iterations in the feature importance calculation.
            If None, if the best iteration exists, it is used; otherwise, all trees are used.
            If <= 0, all trees are used (no limits).
2483

2484
2485
        Returns
        -------
2486
2487
        result : numpy array
            Array with feature importances.
2488
        """
2489
2490
        if iteration is None:
            iteration = self.best_iteration
2491
2492
2493
2494
2495
2496
        if importance_type == "split":
            importance_type_int = 0
        elif importance_type == "gain":
            importance_type_int = 1
        else:
            importance_type_int = -1
Nikita Titov's avatar
Nikita Titov committed
2497
        result = np.zeros(self.num_feature(), dtype=np.float64)
2498
2499
2500
2501
2502
2503
2504
2505
2506
        _safe_call(_LIB.LGBM_BoosterFeatureImportance(
            self.handle,
            ctypes.c_int(iteration),
            ctypes.c_int(importance_type_int),
            result.ctypes.data_as(ctypes.POINTER(ctypes.c_double))))
        if importance_type_int == 0:
            return result.astype(int)
        else:
            return result
2507

2508
2509
2510
2511
2512
2513
2514
2515
2516
    def get_split_value_histogram(self, feature, bins=None, xgboost_style=False):
        """Get split value histogram for the specified feature.

        Parameters
        ----------
        feature : int or string
            The feature name or index the histogram is calculated for.
            If int, interpreted as index.
            If string, interpreted as name.
2517
2518
2519
2520
2521

            Note
            ----
            Categorical features are not supported.

2522
2523
2524
2525
2526
2527
2528
2529
2530
2531
2532
2533
2534
2535
2536
2537
2538
2539
2540
2541
2542
2543
2544
2545
2546
2547
2548
        bins : int, string or None, optional (default=None)
            The maximum number of bins.
            If None, or int and > number of unique split values and ``xgboost_style=True``,
            the number of bins equals number of unique split values.
            If string, it should be one from the list of the supported values by ``numpy.histogram()`` function.
        xgboost_style : bool, optional (default=False)
            Whether the returned result should be in the same form as it is in XGBoost.
            If False, the returned value is tuple of 2 numpy arrays as it is in ``numpy.histogram()`` function.
            If True, the returned value is matrix, in which the first column is the right edges of non-empty bins
            and the second one is the histogram values.

        Returns
        -------
        result_tuple : tuple of 2 numpy arrays
            If ``xgboost_style=False``, the values of the histogram of used splitting values for the specified feature
            and the bin edges.
        result_array_like : numpy array or pandas DataFrame (if pandas is installed)
            If ``xgboost_style=True``, the histogram of used splitting values for the specified feature.
        """
        def add(root):
            """Recursively add thresholds."""
            if 'split_index' in root:  # non-leaf
                if feature_names is not None and isinstance(feature, string_type):
                    split_feature = feature_names[root['split_feature']]
                else:
                    split_feature = root['split_feature']
                if split_feature == feature:
2549
2550
2551
2552
                    if isinstance(root['threshold'], string_type):
                        raise LightGBMError('Cannot compute split value histogram for the categorical feature')
                    else:
                        values.append(root['threshold'])
2553
2554
2555
2556
2557
2558
2559
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
                add(root['left_child'])
                add(root['right_child'])

        model = self.dump_model()
        feature_names = model.get('feature_names')
        tree_infos = model['tree_info']
        values = []
        for tree_info in tree_infos:
            add(tree_info['tree_structure'])

        if bins is None or isinstance(bins, integer_types) and xgboost_style:
            n_unique = len(np.unique(values))
            bins = max(min(n_unique, bins) if bins is not None else n_unique, 1)
        hist, bin_edges = np.histogram(values, bins=bins)
        if xgboost_style:
            ret = np.column_stack((bin_edges[1:], hist))
            ret = ret[ret[:, 1] > 0]
            if PANDAS_INSTALLED:
                return DataFrame(ret, columns=['SplitValue', 'Count'])
            else:
                return ret
        else:
            return hist, bin_edges

wxchan's avatar
wxchan committed
2577
    def __inner_eval(self, data_name, data_idx, feval=None):
2578
        """Evaluate training or validation data."""
wxchan's avatar
wxchan committed
2579
        if data_idx >= self.__num_dataset:
2580
            raise ValueError("Data_idx should be smaller than number of dataset")
wxchan's avatar
wxchan committed
2581
2582
2583
        self.__get_eval_info()
        ret = []
        if self.__num_inner_eval > 0:
2584
            result = np.zeros(self.__num_inner_eval, dtype=np.float64)
Guolin Ke's avatar
Guolin Ke committed
2585
            tmp_out_len = ctypes.c_int(0)
wxchan's avatar
wxchan committed
2586
2587
            _safe_call(_LIB.LGBM_BoosterGetEval(
                self.handle,
Guolin Ke's avatar
Guolin Ke committed
2588
                ctypes.c_int(data_idx),
wxchan's avatar
wxchan committed
2589
                ctypes.byref(tmp_out_len),
Guolin Ke's avatar
Guolin Ke committed
2590
                result.ctypes.data_as(ctypes.POINTER(ctypes.c_double))))
wxchan's avatar
wxchan committed
2591
            if tmp_out_len.value != self.__num_inner_eval:
2592
                raise ValueError("Wrong length of eval results")
wxchan's avatar
wxchan committed
2593
            for i in range_(self.__num_inner_eval):
2594
2595
                ret.append((data_name, self.__name_inner_eval[i],
                            result[i], self.__higher_better_inner_eval[i]))
wxchan's avatar
wxchan committed
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605
2606
2607
2608
2609
2610
        if feval is not None:
            if data_idx == 0:
                cur_data = self.train_set
            else:
                cur_data = self.valid_sets[data_idx - 1]
            feval_ret = feval(self.__inner_predict(data_idx), cur_data)
            if isinstance(feval_ret, list):
                for eval_name, val, is_higher_better in feval_ret:
                    ret.append((data_name, eval_name, val, is_higher_better))
            else:
                eval_name, val, is_higher_better = feval_ret
                ret.append((data_name, eval_name, val, is_higher_better))
        return ret

    def __inner_predict(self, data_idx):
2611
        """Predict for training and validation dataset."""
wxchan's avatar
wxchan committed
2612
        if data_idx >= self.__num_dataset:
2613
            raise ValueError("Data_idx should be smaller than number of dataset")
wxchan's avatar
wxchan committed
2614
2615
2616
2617
2618
        if self.__inner_predict_buffer[data_idx] is None:
            if data_idx == 0:
                n_preds = self.train_set.num_data() * self.__num_class
            else:
                n_preds = self.valid_sets[data_idx - 1].num_data() * self.__num_class
2619
            self.__inner_predict_buffer[data_idx] = np.zeros(n_preds, dtype=np.float64)
2620
        # avoid to predict many time in one iteration
wxchan's avatar
wxchan committed
2621
2622
        if not self.__is_predicted_cur_iter[data_idx]:
            tmp_out_len = ctypes.c_int64(0)
Guolin Ke's avatar
Guolin Ke committed
2623
            data_ptr = self.__inner_predict_buffer[data_idx].ctypes.data_as(ctypes.POINTER(ctypes.c_double))
wxchan's avatar
wxchan committed
2624
2625
            _safe_call(_LIB.LGBM_BoosterGetPredict(
                self.handle,
Guolin Ke's avatar
Guolin Ke committed
2626
                ctypes.c_int(data_idx),
wxchan's avatar
wxchan committed
2627
2628
2629
                ctypes.byref(tmp_out_len),
                data_ptr))
            if tmp_out_len.value != len(self.__inner_predict_buffer[data_idx]):
2630
                raise ValueError("Wrong length of predict results for data %d" % (data_idx))
wxchan's avatar
wxchan committed
2631
2632
2633
2634
            self.__is_predicted_cur_iter[data_idx] = True
        return self.__inner_predict_buffer[data_idx]

    def __get_eval_info(self):
2635
        """Get inner evaluation count and names."""
wxchan's avatar
wxchan committed
2636
2637
        if self.__need_reload_eval_info:
            self.__need_reload_eval_info = False
Guolin Ke's avatar
Guolin Ke committed
2638
            out_num_eval = ctypes.c_int(0)
2639
            # Get num of inner evals
wxchan's avatar
wxchan committed
2640
2641
2642
2643
2644
            _safe_call(_LIB.LGBM_BoosterGetEvalCounts(
                self.handle,
                ctypes.byref(out_num_eval)))
            self.__num_inner_eval = out_num_eval.value
            if self.__num_inner_eval > 0:
2645
                # Get name of evals
Guolin Ke's avatar
Guolin Ke committed
2646
                tmp_out_len = ctypes.c_int(0)
wxchan's avatar
wxchan committed
2647
                string_buffers = [ctypes.create_string_buffer(255) for i in range_(self.__num_inner_eval)]
wxchan's avatar
wxchan committed
2648
                ptr_string_buffers = (ctypes.c_char_p * self.__num_inner_eval)(*map(ctypes.addressof, string_buffers))
wxchan's avatar
wxchan committed
2649
2650
2651
2652
2653
                _safe_call(_LIB.LGBM_BoosterGetEvalNames(
                    self.handle,
                    ctypes.byref(tmp_out_len),
                    ptr_string_buffers))
                if self.__num_inner_eval != tmp_out_len.value:
2654
                    raise ValueError("Length of eval names doesn't equal with num_evals")
2655
                self.__name_inner_eval = \
wxchan's avatar
wxchan committed
2656
                    [string_buffers[i].value.decode() for i in range_(self.__num_inner_eval)]
2657
                self.__higher_better_inner_eval = \
2658
                    [name.startswith(('auc', 'ndcg@', 'map@')) for name in self.__name_inner_eval]
2659

wxchan's avatar
wxchan committed
2660
    def attr(self, key):
2661
        """Get attribute string from the Booster.
wxchan's avatar
wxchan committed
2662
2663
2664

        Parameters
        ----------
2665
2666
        key : string
            The name of the attribute.
wxchan's avatar
wxchan committed
2667
2668
2669

        Returns
        -------
2670
2671
        value : string or None
            The attribute value.
Nikita Titov's avatar
Nikita Titov committed
2672
            Returns None if attribute does not exist.
wxchan's avatar
wxchan committed
2673
        """
2674
        return self.__attr.get(key, None)
wxchan's avatar
wxchan committed
2675
2676

    def set_attr(self, **kwargs):
2677
        """Set attributes to the Booster.
wxchan's avatar
wxchan committed
2678
2679
2680
2681

        Parameters
        ----------
        **kwargs
2682
2683
            The attributes to set.
            Setting a value to None deletes an attribute.
Nikita Titov's avatar
Nikita Titov committed
2684
2685
2686
2687

        Returns
        -------
        self : Booster
2688
            Booster with set attributes.
wxchan's avatar
wxchan committed
2689
2690
2691
        """
        for key, value in kwargs.items():
            if value is not None:
wxchan's avatar
wxchan committed
2692
                if not isinstance(value, string_type):
Nikita Titov's avatar
Nikita Titov committed
2693
                    raise ValueError("Only string values are accepted")
wxchan's avatar
wxchan committed
2694
2695
2696
                self.__attr[key] = value
            else:
                self.__attr.pop(key, None)
Nikita Titov's avatar
Nikita Titov committed
2697
        return self