"examples/git@developer.sourcefind.cn:tianlh/lightgbm-dcu.git" did not exist on "631e0a2a7bdd694a91f30378fb271d05ce438122"
basic.py 58.8 KB
Newer Older
wxchan's avatar
wxchan committed
1
# coding: utf-8
2
3
# pylint: disable = invalid-name, C0111, C0301, R0912, R0913, R0914, W0105
# pylint: disable = E1101
wxchan's avatar
wxchan committed
4
5
6
7
8
9
10
11
12
13
14
15
16
"""Wrapper c_api of LightGBM"""
from __future__ import absolute_import

import sys
import ctypes
import tempfile
import json

import numpy as np
import scipy.sparse

from .libpath import find_lib_path

Guolin Ke's avatar
Guolin Ke committed
17
"""pandas"""
wxchan's avatar
wxchan committed
18
19
20
21
22
23
24
25
try:
    from pandas import Series, DataFrame
    IS_PANDAS_INSTALLED = True
except ImportError:
    class Series(object):
        pass
    class DataFrame(object):
        pass
26
    IS_PANDAS_INSTALLED = False
wxchan's avatar
wxchan committed
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55

IS_PY3 = (sys.version_info[0] == 3)

def _load_lib():
    """Load LightGBM Library."""
    lib_path = find_lib_path()
    if len(lib_path) == 0:
        raise Exception("cannot find LightGBM library")
    lib = ctypes.cdll.LoadLibrary(lib_path[0])
    lib.LGBM_GetLastError.restype = ctypes.c_char_p
    return lib

_LIB = _load_lib()

class LightGBMError(Exception):
    """Error throwed by LightGBM"""
    pass

def _safe_call(ret):
    """Check the return value of C API call
    Parameters
    ----------
    ret : int
        return value from API calls
    """
    if ret != 0:
        raise LightGBMError(_LIB.LGBM_GetLastError())

def is_str(s):
Guolin Ke's avatar
Guolin Ke committed
56
    """Check is a str or not"""
wxchan's avatar
wxchan committed
57
58
59
60
61
62
    if IS_PY3:
        return isinstance(s, str)
    else:
        return isinstance(s, basestring)

def is_numpy_object(data):
Guolin Ke's avatar
Guolin Ke committed
63
    """Check is numpy object"""
wxchan's avatar
wxchan committed
64
65
66
    return type(data).__module__ == np.__name__

def is_numpy_1d_array(data):
Guolin Ke's avatar
Guolin Ke committed
67
    """Check is 1d numpy array"""
68
    return isinstance(data, np.ndarray) and len(data.shape) == 1
wxchan's avatar
wxchan committed
69
70

def is_1d_list(data):
Guolin Ke's avatar
Guolin Ke committed
71
    """Check is 1d list"""
72
73
    return isinstance(data, list) and \
        (not data or isinstance(data[0], (int, float, bool)))
wxchan's avatar
wxchan committed
74
75

def list_to_1d_numpy(data, dtype):
Guolin Ke's avatar
Guolin Ke committed
76
    """convert to 1d numpy array"""
wxchan's avatar
wxchan committed
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
    if is_numpy_1d_array(data):
        if data.dtype == dtype:
            return data
        else:
            return data.astype(dtype=dtype, copy=False)
    elif is_1d_list(data):
        return np.array(data, dtype=dtype, copy=False)
    elif IS_PANDAS_INSTALLED and isinstance(data, Series):
        return data.astype(dtype).values
    else:
        raise TypeError("Unknow type({})".format(type(data).__name__))

def cfloat32_array_to_numpy(cptr, length):
    """Convert a ctypes float pointer array to a numpy array.
    """
    if isinstance(cptr, ctypes.POINTER(ctypes.c_float)):
        res = np.fromiter(cptr, dtype=np.float32, count=length)
        return res
    else:
        raise RuntimeError('expected float pointer')

def cint32_array_to_numpy(cptr, length):
    """Convert a ctypes float pointer array to a numpy array.
    """
    if isinstance(cptr, ctypes.POINTER(ctypes.c_int32)):
        res = np.fromiter(cptr, dtype=np.int32, count=length)
        return res
    else:
        raise RuntimeError('expected int pointer')

def c_str(string):
    """Convert a python string to cstring."""
    return ctypes.c_char_p(string.encode('utf-8'))

def c_array(ctype, values):
    """Convert a python array to c array."""
    return (ctype * len(values))(*values)

def param_dict_to_str(data):
116
    if not data:
wxchan's avatar
wxchan committed
117
118
119
        return ""
    pairs = []
    for key, val in data.items():
Guolin Ke's avatar
Guolin Ke committed
120
        if is_str(val) or isinstance(val, (int, float, bool, np.integer, np.float, np.float32)):
wxchan's avatar
wxchan committed
121
            pairs.append(str(key)+'='+str(val))
122
        elif isinstance(val, (list, tuple, set)):
wxchan's avatar
wxchan committed
123
124
125
126
127
            pairs.append(str(key)+'='+','.join(map(str, val)))
        else:
            raise TypeError('unknow type of parameter:%s , got:%s'
                            % (key, type(val).__name__))
    return ' '.join(pairs)
128

wxchan's avatar
wxchan committed
129
130
131
132
133
"""marco definition of data type in c_api of LightGBM"""
C_API_DTYPE_FLOAT32 = 0
C_API_DTYPE_FLOAT64 = 1
C_API_DTYPE_INT32 = 2
C_API_DTYPE_INT64 = 3
Guolin Ke's avatar
Guolin Ke committed
134

wxchan's avatar
wxchan committed
135
136
137
"""Matric is row major in python"""
C_API_IS_ROW_MAJOR = 1

Guolin Ke's avatar
Guolin Ke committed
138
"""marco definition of prediction type in c_api of LightGBM"""
wxchan's avatar
wxchan committed
139
140
141
142
C_API_PREDICT_NORMAL = 0
C_API_PREDICT_RAW_SCORE = 1
C_API_PREDICT_LEAF_INDEX = 2

Guolin Ke's avatar
Guolin Ke committed
143
"""data type of data field"""
wxchan's avatar
wxchan committed
144
145
146
147
148
149
FIELD_TYPE_MAPPER = {"label": C_API_DTYPE_FLOAT32,
                     "weight": C_API_DTYPE_FLOAT32,
                     "init_score": C_API_DTYPE_FLOAT32,
                     "group": C_API_DTYPE_INT32}

def c_float_array(data):
Guolin Ke's avatar
Guolin Ke committed
150
    """get pointer of float numpy array / list"""
wxchan's avatar
wxchan committed
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
    if is_1d_list(data):
        data = np.array(data, copy=False)
    if is_numpy_1d_array(data):
        if data.dtype == np.float32:
            ptr_data = data.ctypes.data_as(ctypes.POINTER(ctypes.c_float))
            type_data = C_API_DTYPE_FLOAT32
        elif data.dtype == np.float64:
            ptr_data = data.ctypes.data_as(ctypes.POINTER(ctypes.c_double))
            type_data = C_API_DTYPE_FLOAT64
        else:
            raise TypeError("expected np.float32 or np.float64, met type({})"
                            .format(data.dtype))
    else:
        raise TypeError("Unknow type({})".format(type(data).__name__))
    return (ptr_data, type_data)

def c_int_array(data):
Guolin Ke's avatar
Guolin Ke committed
168
    """get pointer of int numpy array / list"""
wxchan's avatar
wxchan committed
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
    if is_1d_list(data):
        data = np.array(data, copy=False)
    if is_numpy_1d_array(data):
        if data.dtype == np.int32:
            ptr_data = data.ctypes.data_as(ctypes.POINTER(ctypes.c_int32))
            type_data = C_API_DTYPE_INT32
        elif data.dtype == np.int64:
            ptr_data = data.ctypes.data_as(ctypes.POINTER(ctypes.c_int64))
            type_data = C_API_DTYPE_INT64
        else:
            raise TypeError("expected np.int32 or np.int64, met type({})"
                            .format(data.dtype))
    else:
        raise TypeError("Unknow type({})".format(type(data).__name__))
    return (ptr_data, type_data)

Guolin Ke's avatar
Guolin Ke committed
185
186
187
188
189
class _InnerPredictor(object):
    """
    A _InnerPredictor of LightGBM. 
    Only used for prediction, usually used for continued-train 
    Note: Can convert from Booster, but cannot convert to Booster
wxchan's avatar
wxchan committed
190
    """
Guolin Ke's avatar
Guolin Ke committed
191
192
    def __init__(self, model_file=None, booster_handle=None):
        """Initialize the _InnerPredictor. Not expose to user
wxchan's avatar
wxchan committed
193
194
195
196
197

        Parameters
        ----------
        model_file : string
            Path to the model file.
Guolin Ke's avatar
Guolin Ke committed
198
199
        booster_handle : Handle of Booster
            use handle to init
wxchan's avatar
wxchan committed
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
        """
        self.handle = ctypes.c_void_p()
        self.__is_manage_handle = True
        if model_file is not None:
            """Prediction task"""
            out_num_iterations = ctypes.c_int64(0)
            _safe_call(_LIB.LGBM_BoosterCreateFromModelfile(
                c_str(model_file),
                ctypes.byref(out_num_iterations),
                ctypes.byref(self.handle)))
            out_num_class = ctypes.c_int64(0)
            _safe_call(_LIB.LGBM_BoosterGetNumClasses(
                self.handle,
                ctypes.byref(out_num_class)))
            self.num_class = out_num_class.value
215
            self.num_total_iteration = out_num_iterations.value
wxchan's avatar
wxchan committed
216
        elif booster_handle is not None:
Guolin Ke's avatar
Guolin Ke committed
217
            self.__is_manage_handle = False
wxchan's avatar
wxchan committed
218
219
220
221
222
223
224
225
226
227
            self.handle = booster_handle
            out_num_class = ctypes.c_int64(0)
            _safe_call(_LIB.LGBM_BoosterGetNumClasses(
                self.handle,
                ctypes.byref(out_num_class)))
            self.num_class = out_num_class.value
            out_num_iterations = ctypes.c_int64(0)
            _safe_call(_LIB.LGBM_BoosterGetCurrentIteration(
                self.handle,
                ctypes.byref(out_num_iterations)))
228
            self.num_total_iteration = out_num_iterations.value
wxchan's avatar
wxchan committed
229
        else:
Guolin Ke's avatar
Guolin Ke committed
230
            raise TypeError('Need Model file or Booster handle to create a predictor')
wxchan's avatar
wxchan committed
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246

    def __del__(self):
        if self.__is_manage_handle:
            _safe_call(_LIB.LGBM_BoosterFree(self.handle))


    def predict(self, data, num_iteration=-1,
                raw_score=False, pred_leaf=False, data_has_header=False,
                is_reshape=True):
        """
        Predict logic

        Parameters
        ----------
        data : string/numpy array/scipy.sparse
            Data source for prediction
247
            When data type is string, it represents the path of txt file
wxchan's avatar
wxchan committed
248
        num_iteration : int
249
            Used iteration for prediction
wxchan's avatar
wxchan committed
250
251
252
253
254
        raw_score : bool
            True for predict raw score
        pred_leaf : bool
            True for predict leaf index
        data_has_header : bool
Guolin Ke's avatar
Guolin Ke committed
255
            Used for txt data, True if txt data has header
wxchan's avatar
wxchan committed
256
        is_reshape : bool
257
            Reshape to (nrow, ncol) if true
wxchan's avatar
wxchan committed
258
259
260
261
262

        Returns
        -------
        Prediction result
        """
Guolin Ke's avatar
Guolin Ke committed
263
        if isinstance(data, (_InnerDataset, Dataset)):
264
            raise TypeError("cannot use Dataset instance for prediction, please use raw data instead")
wxchan's avatar
wxchan committed
265
266
267
268
269
270
        predict_type = C_API_PREDICT_NORMAL
        if raw_score:
            predict_type = C_API_PREDICT_RAW_SCORE
        if pred_leaf:
            predict_type = C_API_PREDICT_LEAF_INDEX
        int_data_has_header = 1 if data_has_header else 0
271
272
        if num_iteration > self.num_total_iteration:
            num_iteration = self.num_total_iteration
wxchan's avatar
wxchan committed
273
274
275
276
277
278
279
280
281
        if is_str(data):
            tmp_pred_fname = tempfile.NamedTemporaryFile(prefix="lightgbm_tmp_pred_").name
            _safe_call(_LIB.LGBM_BoosterPredictForFile(
                self.handle,
                c_str(data),
                int_data_has_header,
                predict_type,
                num_iteration,
                c_str(tmp_pred_fname)))
282
283
284
285
286
            with open(tmp_pred_fname, "r") as tmp_file:
                lines = tmp_file.readlines()
                nrow = len(lines)
                preds = [float(token) for line in lines for token in line.split('\t')]
                preds = np.array(preds, dtype=np.float32, copy=False)
wxchan's avatar
wxchan committed
287
288
289
290
291
292
        elif isinstance(data, scipy.sparse.csr_matrix):
            preds, nrow = self.__pred_for_csr(data, num_iteration,
                                              predict_type)
        elif isinstance(data, np.ndarray):
            preds, nrow = self.__pred_for_np2d(data, num_iteration,
                                               predict_type)
293
294
295
        elif IS_PANDAS_INSTALLED and isinstance(data, DataFrame):
            preds, nrow = self.__pred_for_np2d(data.values, num_iteration,
                                               predict_type)
wxchan's avatar
wxchan committed
296
297
298
299
300
301
302
303
304
305
        else:
            try:
                csr = scipy.sparse.csr_matrix(data)
                preds, nrow = self.__pred_for_csr(csr, num_iteration,
                                                  predict_type)
            except:
                raise TypeError('can not predict data for type {}'.
                                format(type(data).__name__))
        if pred_leaf:
            preds = preds.astype(np.int32)
306
        if is_reshape and preds.size != nrow:
wxchan's avatar
wxchan committed
307
            if preds.size % nrow == 0:
308
                preds = preds.reshape(nrow, -1)
wxchan's avatar
wxchan committed
309
            else:
310
                raise ValueError('length of predict result (%d) cannot be divide nrow (%d)'
wxchan's avatar
wxchan committed
311
312
313
314
                                 % (preds.size, nrow))
        return preds

    def __get_num_preds(self, num_iteration, nrow, predict_type):
Guolin Ke's avatar
Guolin Ke committed
315
316
317
        """
        Get size of prediction result
        """
wxchan's avatar
wxchan committed
318
319
320
        n_preds = self.num_class * nrow
        if predict_type == C_API_PREDICT_LEAF_INDEX:
            if num_iteration > 0:
321
                n_preds *= min(num_iteration, self.num_total_iteration)
wxchan's avatar
wxchan committed
322
            else:
323
                n_preds *= self.num_total_iteration
wxchan's avatar
wxchan committed
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
        return n_preds

    def __pred_for_np2d(self, mat, num_iteration, predict_type):
        """
        Predict for a 2-D numpy matrix.
        """
        if len(mat.shape) != 2:
            raise ValueError('Input numpy.ndarray must be 2 dimensional')

        if mat.dtype == np.float32 or mat.dtype == np.float64:
            data = np.array(mat.reshape(mat.size), dtype=mat.dtype, copy=False)
        else:
            """change non-float data to float data, need to copy"""
            data = np.array(mat.reshape(mat.size), dtype=np.float32)
        ptr_data, type_ptr_data = c_float_array(data)
        n_preds = self.__get_num_preds(num_iteration, mat.shape[0],
                                       predict_type)
        preds = np.zeros(n_preds, dtype=np.float32)
        out_num_preds = ctypes.c_int64(0)
        _safe_call(_LIB.LGBM_BoosterPredictForMat(
            self.handle,
            ptr_data,
            type_ptr_data,
            mat.shape[0],
            mat.shape[1],
            C_API_IS_ROW_MAJOR,
            predict_type,
            num_iteration,
            ctypes.byref(out_num_preds),
            preds.ctypes.data_as(ctypes.POINTER(ctypes.c_float))
            ))
        if n_preds != out_num_preds.value:
            raise ValueError("incorrect number for predict result")
        return preds, mat.shape[0]

    def __pred_for_csr(self, csr, num_iteration, predict_type):
        """
        Predict for a csr data
        """
        nrow = len(csr.indptr) - 1
        n_preds = self.__get_num_preds(num_iteration, nrow, predict_type)
        preds = np.zeros(n_preds, dtype=np.float32)
        out_num_preds = ctypes.c_int64(0)

        ptr_indptr, type_ptr_indptr = c_int_array(csr.indptr)
        ptr_data, type_ptr_data = c_float_array(csr.data)

        _safe_call(_LIB.LGBM_BoosterPredictForCSR(
            self.handle,
            ptr_indptr,
            type_ptr_indptr,
            csr.indices.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)),
            ptr_data,
            type_ptr_data,
            len(csr.indptr),
            len(csr.data),
            csr.shape[1],
            predict_type,
            num_iteration,
            ctypes.byref(out_num_preds),
            preds.ctypes.data_as(ctypes.POINTER(ctypes.c_float))
            ))
        if n_preds != out_num_preds.value:
            raise ValueError("incorrect number for predict result")
        return preds, nrow

PANDAS_DTYPE_MAPPER = {'int8': 'int', 'int16': 'int', 'int32': 'int',
                       'int64': 'int', 'uint8': 'int', 'uint16': 'int',
                       'uint32': 'int', 'uint64': 'int', 'float16': 'float',
393
                       'float32': 'float', 'float64': 'float', 'bool': 'int'}
wxchan's avatar
wxchan committed
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416

def _data_from_pandas(data):
    if isinstance(data, DataFrame):
        data_dtypes = data.dtypes
        if not all(dtype.name in PANDAS_DTYPE_MAPPER for dtype in data_dtypes):
            bad_fields = [data.columns[i] for i, dtype in
                          enumerate(data_dtypes) if dtype.name not in PANDAS_DTYPE_MAPPER]

            msg = """DataFrame.dtypes for data must be int, float or bool. Did not expect the data types in fields """
            raise ValueError(msg + ', '.join(bad_fields))
        data = data.values.astype('float')
    return data

def _label_from_pandas(label):
    if isinstance(label, DataFrame):
        if len(label.columns) > 1:
            raise ValueError('DataFrame for label cannot have multiple columns')
        label_dtypes = label.dtypes
        if not all(dtype.name in PANDAS_DTYPE_MAPPER for dtype in label_dtypes):
            raise ValueError('DataFrame.dtypes for label must be int, float or bool')
        label = label.values.astype('float')
    return label

Guolin Ke's avatar
Guolin Ke committed
417
418
419
420
class _InnerDataset(object):
    """_InnerDataset used in LightGBM.
    _InnerDataset is a internal data structure that used by LightGBM.
    This class is not exposed. Please use Dataset instead
wxchan's avatar
wxchan committed
421
422
423
424
    """

    def __init__(self, data, label=None, max_bin=255, reference=None,
                 weight=None, group=None, predictor=None,
425
                 silent=False, feature_name=None,
Guolin Ke's avatar
Guolin Ke committed
426
                 categorical_feature=None, params=None):
wxchan's avatar
wxchan committed
427
        """
Guolin Ke's avatar
Guolin Ke committed
428
        _InnerDataset used in LightGBM.
wxchan's avatar
wxchan committed
429
430
431
432

        Parameters
        ----------
        data : string/numpy array/scipy.sparse
Guolin Ke's avatar
Guolin Ke committed
433
            Data source of _InnerDataset.
434
            When data type is string, it represents the path of txt file
wxchan's avatar
wxchan committed
435
436
437
        label : list or numpy 1-D array, optional
            Label of the data
        max_bin : int, required
438
            Max number of discrete bin for features
Guolin Ke's avatar
Guolin Ke committed
439
        reference : Other _InnerDataset, optional
wxchan's avatar
wxchan committed
440
441
442
443
            If this dataset validation, need to use training data as reference
        weight : list or numpy 1-D array , optional
            Weight for each instance.
        group : list or numpy 1-D array , optional
444
            Group/query size for dataset
Guolin Ke's avatar
Guolin Ke committed
445
446
        predictor : _InnerPredictor
            Used for continuned train
wxchan's avatar
wxchan committed
447
448
        silent : boolean, optional
            Whether print messages during construction
Guolin Ke's avatar
Guolin Ke committed
449
        feature_name : list of str
450
451
452
453
            Feature names
        categorical_feature : list of str or int
            Categorical features, type int represents index, \
            type str represents feature names (need to specify feature_name as well)
wxchan's avatar
wxchan committed
454
        params: dict, optional
455
            Other parameters
wxchan's avatar
wxchan committed
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
        """
        if data is None:
            self.handle = None
            return
        data = _data_from_pandas(data)
        label = _label_from_pandas(label)
        self.data_has_header = False
        """process for args"""
        params = {} if params is None else params
        self.max_bin = max_bin
        self.predictor = predictor
        params["max_bin"] = max_bin
        if silent:
            params["verbose"] = 0
        elif "verbose" not in params:
            params["verbose"] = 1
Guolin Ke's avatar
Guolin Ke committed
472
473
        """get categorical features"""
        if categorical_feature is not None:
474
            categorical_indices = set()
Guolin Ke's avatar
Guolin Ke committed
475
476
            feature_dict = {}
            if feature_name is not None:
477
                feature_dict = {name: i for i, name in enumerate(feature_name)}
Guolin Ke's avatar
Guolin Ke committed
478
479
            for name in categorical_feature:
                if is_str(name) and name in feature_dict:
480
                    categorical_indices.add(feature_dict[name])
Guolin Ke's avatar
Guolin Ke committed
481
                elif isinstance(name, int):
482
                    categorical_indices.add(name)
Guolin Ke's avatar
Guolin Ke committed
483
                else:
484
                    raise TypeError("unknown type({}) or unknown name({}) in categorical_feature" \
Guolin Ke's avatar
Guolin Ke committed
485
486
487
488
                        .format(type(name).__name__, name))

            params['categorical_column'] = categorical_indices

wxchan's avatar
wxchan committed
489
490
491
        params_str = param_dict_to_str(params)
        """process for reference dataset"""
        ref_dataset = None
Guolin Ke's avatar
Guolin Ke committed
492
        if isinstance(reference, _InnerDataset):
wxchan's avatar
wxchan committed
493
494
495
496
497
498
            ref_dataset = ctypes.byref(reference.handle)
        elif reference is not None:
            raise TypeError('Reference dataset should be None or dataset instance')
        """start construct data"""
        if is_str(data):
            """check data has header or not"""
499
500
501
            if params.get("has_header", "").lower() == "true" \
                or params.get("header", "").lower() == "true":
                self.data_has_header = True
wxchan's avatar
wxchan committed
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
            self.handle = ctypes.c_void_p()
            _safe_call(_LIB.LGBM_DatasetCreateFromFile(
                c_str(data),
                c_str(params_str),
                ref_dataset,
                ctypes.byref(self.handle)))
        elif isinstance(data, scipy.sparse.csr_matrix):
            self.__init_from_csr(data, params_str, ref_dataset)
        elif isinstance(data, np.ndarray):
            self.__init_from_np2d(data, params_str, ref_dataset)
        else:
            try:
                csr = scipy.sparse.csr_matrix(data)
                self.__init_from_csr(csr, params_str, ref_dataset)
            except:
Guolin Ke's avatar
Guolin Ke committed
517
                raise TypeError('can not initialize _InnerDataset from {}'.format(type(data).__name__))
wxchan's avatar
wxchan committed
518
519
520
521
522
523
524
525
526
        if label is not None:
            self.set_label(label)
        if self.get_label() is None:
            raise ValueError("label should not be None")
        if weight is not None:
            self.set_weight(weight)
        if group is not None:
            self.set_group(group)
        # load init score
Guolin Ke's avatar
Guolin Ke committed
527
        if isinstance(self.predictor, _InnerPredictor):
wxchan's avatar
wxchan committed
528
529
530
531
532
533
534
535
536
537
538
539
540
            init_score = self.predictor.predict(data,
                                                raw_score=True,
                                                data_has_header=self.data_has_header,
                                                is_reshape=False)
            if self.predictor.num_class > 1:
                # need re group init score
                new_init_score = np.zeros(init_score.size(), dtype=np.float32)
                num_data = self.num_data()
                for i in range(num_data):
                    for j in range(self.predictor.num_class):
                        new_init_score[j * num_data + i] = init_score[i * self.predictor.num_class + j]
                init_score = new_init_score
            self.set_init_score(init_score)
Guolin Ke's avatar
Guolin Ke committed
541
542
        elif self.predictor is not None:
            raise TypeError('wrong predictor type {}'.format(type(self.predictor).__name__))
Guolin Ke's avatar
Guolin Ke committed
543
544
        # set feature names
        self.set_feature_name(feature_name)
wxchan's avatar
wxchan committed
545
546
547
548
549
550
551
552
553

    def create_valid(self, data, label=None, weight=None, group=None,
                     silent=False, params=None):
        """
        Create validation data align with current dataset

        Parameters
        ----------
        data : string/numpy array/scipy.sparse
Guolin Ke's avatar
Guolin Ke committed
554
            Data source of _InnerDataset.
555
            When data type is string, it represents the path of txt file
wxchan's avatar
wxchan committed
556
557
558
559
560
        label : list or numpy 1-D array, optional
            Label of the training data.
        weight : list or numpy 1-D array , optional
            Weight for each instance.
        group : list or numpy 1-D array , optional
561
            Group/query size for dataset
wxchan's avatar
wxchan committed
562
563
564
        silent : boolean, optional
            Whether print messages during construction
        params: dict, optional
565
            Other parameters
wxchan's avatar
wxchan committed
566
        """
Guolin Ke's avatar
Guolin Ke committed
567
568
569
        return _InnerDataset(data, label=label, max_bin=self.max_bin, reference=self,
                             weight=weight, group=group, predictor=self.predictor,
                             silent=silent, params=params)
wxchan's avatar
wxchan committed
570
571
572
573
574
575

    def subset(self, used_indices, params=None):
        """
        Get subset of current dataset
        """
        used_indices = list_to_1d_numpy(used_indices, np.int32)
Guolin Ke's avatar
Guolin Ke committed
576
        ret = _InnerDataset(None)
wxchan's avatar
wxchan committed
577
578
579
580
581
582
583
584
585
586
587
588
589
590
        ret.handle = ctypes.c_void_p()
        params_str = param_dict_to_str(params)
        _safe_call(_LIB.LGBM_DatasetGetSubset(
            ctypes.byref(self.handle),
            used_indices.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)),
            used_indices.shape[0],
            c_str(params_str),
            ctypes.byref(ret.handle)))
        ret.max_bin = self.max_bin
        ret.predictor = self.predictor
        if ret.get_label() is None:
            raise ValueError("label should not be None")
        return ret

Guolin Ke's avatar
Guolin Ke committed
591
    def set_feature_name(self, feature_name):
Guolin Ke's avatar
Guolin Ke committed
592
593
594
        """
        set feature names
        """
Guolin Ke's avatar
Guolin Ke committed
595
596
597
598
599
600
601
602
603
604
        if feature_name is None:
            return
        if len(feature_name) != self.num_feature():
            raise ValueError("size of feature_name error")
        c_feature_name = [c_str(name) for name in feature_name]
        _safe_call(_LIB.LGBM_DatasetSetFeatureNames(
            self.handle,
            c_array(ctypes.c_char_p, c_feature_name),
            len(feature_name)))

wxchan's avatar
wxchan committed
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
    def __init_from_np2d(self, mat, params_str, ref_dataset):
        """
        Initialize data from a 2-D numpy matrix.
        """
        if len(mat.shape) != 2:
            raise ValueError('Input numpy.ndarray must be 2 dimensional')

        self.handle = ctypes.c_void_p()
        if mat.dtype == np.float32 or mat.dtype == np.float64:
            data = np.array(mat.reshape(mat.size), dtype=mat.dtype, copy=False)
        else:
            """change non-float data to float data, need to copy"""
            data = np.array(mat.reshape(mat.size), dtype=np.float32)

        ptr_data, type_ptr_data = c_float_array(data)
        _safe_call(_LIB.LGBM_DatasetCreateFromMat(
            ptr_data,
            type_ptr_data,
            mat.shape[0],
            mat.shape[1],
            C_API_IS_ROW_MAJOR,
            c_str(params_str),
            ref_dataset,
            ctypes.byref(self.handle)))

    def __init_from_csr(self, csr, params_str, ref_dataset):
        """
        Initialize data from a CSR matrix.
        """
        if len(csr.indices) != len(csr.data):
            raise ValueError('length mismatch: {} vs {}'.format(len(csr.indices), len(csr.data)))
        self.handle = ctypes.c_void_p()

        ptr_indptr, type_ptr_indptr = c_int_array(csr.indptr)
        ptr_data, type_ptr_data = c_float_array(csr.data)

        _safe_call(_LIB.LGBM_DatasetCreateFromCSR(
            ptr_indptr,
            type_ptr_indptr,
            csr.indices.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)),
            ptr_data,
            type_ptr_data,
            len(csr.indptr),
            len(csr.data),
            csr.shape[1],
            c_str(params_str),
            ref_dataset,
            ctypes.byref(self.handle)))

    def __del__(self):
        _safe_call(_LIB.LGBM_DatasetFree(self.handle))

    def get_field(self, field_name):
Guolin Ke's avatar
Guolin Ke committed
658
        """Get property from the _InnerDataset.
wxchan's avatar
wxchan committed
659
660
661
662
663
664
665
666
667

        Parameters
        ----------
        field_name: str
            The field name of the information

        Returns
        -------
        info : array
668
            A numpy array of information of the data
wxchan's avatar
wxchan committed
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
        """
        tmp_out_len = ctypes.c_int64()
        out_type = ctypes.c_int32()
        ret = ctypes.POINTER(ctypes.c_void_p)()
        _safe_call(_LIB.LGBM_DatasetGetField(
            self.handle,
            c_str(field_name),
            ctypes.byref(tmp_out_len),
            ctypes.byref(ret),
            ctypes.byref(out_type)))
        if out_type.value != FIELD_TYPE_MAPPER[field_name]:
            raise TypeError("Return type error for get_field")
        if tmp_out_len.value == 0:
            return None
        if out_type.value == C_API_DTYPE_INT32:
            return cint32_array_to_numpy(ctypes.cast(ret, ctypes.POINTER(ctypes.c_int32)), tmp_out_len.value)
        elif out_type.value == C_API_DTYPE_FLOAT32:
            return cfloat32_array_to_numpy(ctypes.cast(ret, ctypes.POINTER(ctypes.c_float)), tmp_out_len.value)
        else:
            raise TypeError("unknow type")

    def set_field(self, field_name, data):
Guolin Ke's avatar
Guolin Ke committed
691
        """Set property into the _InnerDataset.
wxchan's avatar
wxchan committed
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709

        Parameters
        ----------
        field_name: str
            The field name of the information

        data: numpy array or list or None
            The array ofdata to be set
        """
        if data is None:
            """set to None"""
            _safe_call(_LIB.LGBM_DatasetSetField(
                self.handle,
                c_str(field_name),
                None,
                0,
                FIELD_TYPE_MAPPER[field_name]))
            return
710
711
712
        if IS_PANDAS_INSTALLED and isinstance(data, Series):
            dtype = np.int32 if field_name == 'group' else np.float32
            data = data.astype(dtype).values
wxchan's avatar
wxchan committed
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
        if not is_numpy_1d_array(data):
            raise TypeError("Unknow type({})".format(type(data).__name__))
        if data.dtype == np.float32:
            ptr_data = data.ctypes.data_as(ctypes.POINTER(ctypes.c_float))
            type_data = C_API_DTYPE_FLOAT32
        elif data.dtype == np.int32:
            ptr_data = data.ctypes.data_as(ctypes.POINTER(ctypes.c_int32))
            type_data = C_API_DTYPE_INT32
        else:
            raise TypeError("excepted np.float32 or np.int32, met type({})".format(data.dtype))
        if type_data != FIELD_TYPE_MAPPER[field_name]:
            raise TypeError("type error for set_field")
        _safe_call(_LIB.LGBM_DatasetSetField(
            self.handle,
            c_str(field_name),
            ptr_data,
            len(data),
            type_data))

    def save_binary(self, filename):
Guolin Ke's avatar
Guolin Ke committed
733
        """Save _InnerDataset to binary file
wxchan's avatar
wxchan committed
734
735
736
737
738
739
740
741
742
743
744

        Parameters
        ----------
        filename : string
            Name of the output file.
        """
        _safe_call(_LIB.LGBM_DatasetSaveBinary(
            self.handle,
            c_str(filename)))

    def set_label(self, label):
Guolin Ke's avatar
Guolin Ke committed
745
        """Set label of _InnerDataset
wxchan's avatar
wxchan committed
746
747
748

        Parameters
        ----------
749
        label: numpy array or list or None
Guolin Ke's avatar
Guolin Ke committed
750
            The label information to be set into _InnerDataset
wxchan's avatar
wxchan committed
751
752
753
754
755
756
757
758
759
        """
        label = list_to_1d_numpy(label, np.float32)
        self.set_field('label', label)

    def set_weight(self, weight):
        """ Set weight of each instance.

        Parameters
        ----------
760
        weight : numpy array or list or None
wxchan's avatar
wxchan committed
761
762
763
764
765
766
767
768
            Weight for each data point
        """
        if weight is not None:
            weight = list_to_1d_numpy(weight, np.float32)
        self.set_field('weight', weight)

    def set_init_score(self, score):
        """ Set init score of booster to start from.
769

wxchan's avatar
wxchan committed
770
771
        Parameters
        ----------
772
773
        score: numpy array or list or None
            Init score for booster
wxchan's avatar
wxchan committed
774
775
776
777
778
779
        """
        if score is not None:
            score = list_to_1d_numpy(score, np.float32)
        self.set_field('init_score', score)

    def set_group(self, group):
Guolin Ke's avatar
Guolin Ke committed
780
        """Set group size of _InnerDataset (used for ranking).
wxchan's avatar
wxchan committed
781
782
783

        Parameters
        ----------
784
        group : numpy array or list or None
wxchan's avatar
wxchan committed
785
786
787
788
789
790
791
            Group size of each group
        """
        if group is not None:
            group = list_to_1d_numpy(group, np.int32)
        self.set_field('group', group)

    def get_label(self):
Guolin Ke's avatar
Guolin Ke committed
792
        """Get the label of the _InnerDataset.
wxchan's avatar
wxchan committed
793
794
795
796
797

        Returns
        -------
        label : array
        """
Guolin Ke's avatar
Guolin Ke committed
798
        return self.get_field('label')
wxchan's avatar
wxchan committed
799
800

    def get_weight(self):
Guolin Ke's avatar
Guolin Ke committed
801
        """Get the weight of the _InnerDataset.
wxchan's avatar
wxchan committed
802
803
804
805
806

        Returns
        -------
        weight : array
        """
Guolin Ke's avatar
Guolin Ke committed
807
        return self.get_field('weight')
wxchan's avatar
wxchan committed
808
809

    def get_init_score(self):
Guolin Ke's avatar
Guolin Ke committed
810
        """Get the initial score of the _InnerDataset.
wxchan's avatar
wxchan committed
811
812
813
814
815

        Returns
        -------
        init_score : array
        """
Guolin Ke's avatar
Guolin Ke committed
816
        return self.get_field('init_score')
wxchan's avatar
wxchan committed
817
818

    def get_group(self):
Guolin Ke's avatar
Guolin Ke committed
819
        """Get the initial score of the _InnerDataset.
wxchan's avatar
wxchan committed
820
821
822
823
824

        Returns
        -------
        init_score : array
        """
Guolin Ke's avatar
Guolin Ke committed
825
        return self.get_field('group')
wxchan's avatar
wxchan committed
826
827

    def num_data(self):
Guolin Ke's avatar
Guolin Ke committed
828
        """Get the number of rows in the _InnerDataset.
wxchan's avatar
wxchan committed
829
830
831
832
833
834
835
836
837
838
839

        Returns
        -------
        number of rows : int
        """
        ret = ctypes.c_int64()
        _safe_call(_LIB.LGBM_DatasetGetNumData(self.handle,
                                               ctypes.byref(ret)))
        return ret.value

    def num_feature(self):
Guolin Ke's avatar
Guolin Ke committed
840
        """Get the number of columns (features) in the _InnerDataset.
wxchan's avatar
wxchan committed
841
842
843
844
845
846
847
848
849
850

        Returns
        -------
        number of columns : int
        """
        ret = ctypes.c_int64()
        _safe_call(_LIB.LGBM_DatasetGetNumFeature(self.handle,
                                                  ctypes.byref(ret)))
        return ret.value

Guolin Ke's avatar
Guolin Ke committed
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
class Dataset(object):
    """High level Dataset used in LightGBM.
    """
    def __init__(self, data, label=None, max_bin=255, reference=None,
                 weight=None, group=None, silent=False,
                 feature_name=None, categorical_feature=None, params=None,
                 free_raw_data=True):
        """
        Parameters
        ----------
        data : string/numpy array/scipy.sparse
            Data source of Dataset.
            When data type is string, it represents the path of txt file
        label : list or numpy 1-D array, optional
            Label of the data
        max_bin : int, required
            Max number of discrete bin for features
        reference : Other Dataset, optional
            If this dataset validation, need to use training data as reference
        weight : list or numpy 1-D array , optional
            Weight for each instance.
        group : list or numpy 1-D array , optional
            Group/query size for dataset
        silent : boolean, optional
            Whether print messages during construction
        feature_name : list of str
            Feature names
        categorical_feature : list of str or int
            Categorical features, type int represents index, \
            type str represents feature names (need to specify feature_name as well)
        params: dict, optional
            Other parameters
        free_raw_data: Bool
            True if need to free raw data after construct inner dataset
        """
        self.data = data
        self.label = label
        self.max_bin = max_bin
        self.reference = reference
        self.weight = weight
        self.group = group
        self.silent = silent
        self.feature_name = feature_name
        self.categorical_feature = categorical_feature
        self.params = params
        self.free_raw_data = free_raw_data
        self.inner_dataset = None
        self.used_indices = None
        self._predictor = None

    def create_valid(self, data, label=None, weight=None, group=None,
                     silent=False, params=None):
        """
        Create validation data align with current dataset

        Parameters
        ----------
        data : string/numpy array/scipy.sparse
            Data source of _InnerDataset.
            When data type is string, it represents the path of txt file
        label : list or numpy 1-D array, optional
            Label of the training data.
        weight : list or numpy 1-D array , optional
            Weight for each instance.
        group : list or numpy 1-D array , optional
            Group/query size for dataset
        silent : boolean, optional
            Whether print messages during construction
        params: dict, optional
            Other parameters
        """
        ret = Dataset(data, label=label, max_bin=self.max_bin, reference=self,
                      weight=weight, group=group,
                      silent=silent, params=params, free_raw_data=self.free_raw_data)
        ret._set_predictor(self._predictor)
        return ret

    def construct(self):
        """Lazy init"""
        if self.inner_dataset is None:
            if self.reference is not None:
                if self.used_indices is None:
                    self.inner_dataset = self.reference._get_inner_dataset().create_valid(
                        self.data, self.label,
                        self.weight, self.group,
                        self.silent, self.params)
                else:
                    """construct subset"""
                    self.inner_dataset = self.reference._get_inner_dataset().subset(
                        self.used_indices, self.params)
            else:
                self.inner_dataset = _InnerDataset(self.data, self.label, self.max_bin,
                    None, self.weight, self.group, self._predictor,
                    self.silent, self.feature_name, self.categorical_feature, self.params)
            if self.free_raw_data:
                self.data = None

    def _get_inner_dataset(self):
        """get inner dataset"""
        self.construct()
        return self.inner_dataset

    def __is_constructed(self):
        """check inner_dataset is constructed or not"""
        return self.inner_dataset is not None

    def set_categorical_feature(self, categorical_feature):
        """
        Set categorical features

        Parameters
        ----------
        categorical_feature : list of int or str
            Name/index of categorical features

        """
        if self.categorical_feature == categorical_feature:
            return
        if self.data is not None:
            self.categorical_feature = categorical_feature
            self.inner_dataset = None
        else:
            raise LightGBMError("Cannot set categorical feature after freed raw data,\
             Set free_raw_data=False when construct Dataset to avoid this.")

    def _set_predictor(self, predictor):
        """
        Set predictor for continued training, not recommand for user to call this function.
        Please set init_model in engine.train or engine.cv
        """
        if predictor is self._predictor:
            return
        if self.data is not None:
            self._predictor = predictor
            self.inner_dataset = None
        else:
            raise LightGBMError("Cannot set predictor after freed raw data,\
             Set free_raw_data=False when construct Dataset to avoid this.")

    def set_reference(self, reference):
        """
        Set reference dataset

        Parameters
        ----------
        reference : Dataset
            will use reference as template to consturct current dataset
        """
        self.set_categorical_feature(reference.categorical_feature)
        self.set_feature_name(reference.feature_name)
        self._set_predictor(reference._predictor)
        if self.reference is reference:
            return
        if self.data is not None:
            self.reference = reference
            self.inner_dataset = None
        else:
            raise LightGBMError("Cannot set reference after freed raw data,\
             Set free_raw_data=False when construct Dataset to avoid this.")

    def set_feature_name(self, feature_name):
        """
        Set feature name

        Parameters
        ----------
        feature_name : list of str
            feature names
        """
        self.feature_name = feature_name
        if self.__is_constructed():
            self.inner_dataset.set_feature_name(self.feature_name)

    def subset(self, used_indices, params=None):
        """
        Get subset of current dataset

        Parameters
        ----------
        used_indices : list of int
            use indices of this subset
        params : dict
            other parameters
        """
        ret = Dataset(None)
        ret.feature_name = self.feature_name
        ret.categorical_feature = self.categorical_feature
        ret.reference = self
        ret._predictor = self._predictor
        ret.used_indices = used_indices
        ret.params = params
        return ret

    def save_binary(self, filename):
        """Save Dataset to binary file

        Parameters
        ----------
        filename : string
            Name of the output file.
        """
        self._get_inner_dataset().save_binary(filename)


    def set_label(self, label):
        """Set label of Dataset

        Parameters
        ----------
        label: numpy array or list or None
            The label information to be set into Dataset
        """
        self.label = label
        if self.__is_constructed():
            self.inner_dataset.set_label(self.label)

    def set_weight(self, weight):
        """ Set weight of each instance.

        Parameters
        ----------
        weight : numpy array or list or None
            Weight for each data point
        """
        self.weight = weight
        if self.__is_constructed():
            self.inner_dataset.set_weight(self.weight)

    def set_init_score(self, init_score):
        """ Set init score of booster to start from.

        Parameters
        ----------
        init_score: numpy array or list or None
            Init score for booster
        """
        self.init_score = init_score
        if self.__is_constructed():
            self.inner_dataset.set_init_score(self.init_score)

    def set_group(self, group):
        """Set group size of Dataset (used for ranking).

        Parameters
        ----------
        group : numpy array or list or None
            Group size of each group
        """
        self.group = group
        if self.__is_constructed():
            self.inner_dataset.set_group(self.group)

    def get_label(self):
        """Get the label of the Dataset.

        Returns
        -------
        label : array
        """
        if self.label is None and self.__is_constructed():
            self.label = self.inner_dataset.get_label()
        return self.label

    def get_weight(self):
        """Get the weight of the Dataset.

        Returns
        -------
        weight : array
        """
        if self.weight is None and self.__is_constructed():
            self.weight = self.inner_dataset.get_weight()
        return self.weight

    def get_init_score(self):
        """Get the initial score of the Dataset.

        Returns
        -------
        init_score : array
        """
        if self.init_score is None and self.__is_constructed():
            self.init_score = self.inner_dataset.get_init_score()
        return self.init_score

    def get_group(self):
        """Get the initial score of the Dataset.

        Returns
        -------
        init_score : array
        """
        if self.group is None and self.__is_constructed():
            self.group = self.inner_dataset.get_group()
        return self.group

    def num_data(self):
        """Get the number of rows in the Dataset.

        Returns
        -------
        number of rows : int
        """
        if self.__is_constructed():
            return self.inner_dataset.num_data()
        else:
            raise LightGBMError("Cannot call num_data before construct, please call it explicitly")

    def num_feature(self):
        """Get the number of columns (features) in the Dataset.

        Returns
        -------
        number of columns : int
        """
        if self.__is_constructed():
            return self.inner_dataset.num_feature()
        else:
            raise LightGBMError("Cannot call num_feature before construct, please call it explicitly")

wxchan's avatar
wxchan committed
1171
class Booster(object):
1172
    """"A Booster of LightGBM.
wxchan's avatar
wxchan committed
1173
1174
1175
1176
1177
1178
1179
1180
1181
    """
    def __init__(self, params=None, train_set=None, model_file=None, silent=False):
        """Initialize the Booster.

        Parameters
        ----------
        params : dict
            Parameters for boosters.
        train_set : Dataset
1182
            Training dataset
wxchan's avatar
wxchan committed
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
        model_file : string
            Path to the model file.
        silent : boolean, optional
            Whether print messages during construction
        """
        self.handle = ctypes.c_void_p()
        self.__need_reload_eval_info = True
        self.__train_data_name = "training"
        self.__attr = {}
        self.best_iteration = -1
        params = {} if params is None else params
        if silent:
            params["verbose"] = 0
        elif "verbose" not in params:
            params["verbose"] = 1
        if train_set is not None:
            """Training task"""
            if not isinstance(train_set, Dataset):
1201
                raise TypeError('training data should be Dataset instance, met {}'.format(type(train_set).__name__))
wxchan's avatar
wxchan committed
1202
1203
1204
            params_str = param_dict_to_str(params)
            """construct booster object"""
            _safe_call(_LIB.LGBM_BoosterCreate(
Guolin Ke's avatar
Guolin Ke committed
1205
                train_set._get_inner_dataset().handle,
wxchan's avatar
wxchan committed
1206
1207
1208
1209
1210
1211
1212
                c_str(params_str),
                ctypes.byref(self.handle)))
            """save reference to data"""
            self.train_set = train_set
            self.valid_sets = []
            self.name_valid_sets = []
            self.__num_dataset = 1
Guolin Ke's avatar
Guolin Ke committed
1213
1214
            self.__init_predictor = train_set._predictor
            if self.__init_predictor is not None:
wxchan's avatar
wxchan committed
1215
1216
                _safe_call(_LIB.LGBM_BoosterMerge(
                    self.handle,
Guolin Ke's avatar
Guolin Ke committed
1217
                    self.__init_predictor.handle))
wxchan's avatar
wxchan committed
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
            out_num_class = ctypes.c_int64(0)
            _safe_call(_LIB.LGBM_BoosterGetNumClasses(
                self.handle,
                ctypes.byref(out_num_class)))
            self.__num_class = out_num_class.value
            """buffer for inner predict"""
            self.__inner_predict_buffer = [None]
            self.__is_predicted_cur_iter = [False]
            self.__get_eval_info()
        elif model_file is not None:
            """Prediction task"""
            out_num_iterations = ctypes.c_int64(0)
            _safe_call(_LIB.LGBM_BoosterCreateFromModelfile(
                c_str(model_file),
                ctypes.byref(out_num_iterations),
                ctypes.byref(self.handle)))
            out_num_class = ctypes.c_int64(0)
            _safe_call(_LIB.LGBM_BoosterGetNumClasses(
                self.handle,
                ctypes.byref(out_num_class)))
            self.__num_class = out_num_class.value
        else:
            raise TypeError('At least need training dataset or model file to create booster instance')

    def __del__(self):
Guolin Ke's avatar
Guolin Ke committed
1243
        if self.handle is not None:
wxchan's avatar
wxchan committed
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
            _safe_call(_LIB.LGBM_BoosterFree(self.handle))

    def set_train_data_name(self, name):
        self.__train_data_name = name

    def add_valid(self, data, name):
        """Add an validation data

        Parameters
        ----------
        data : Dataset
1255
            Validation data
wxchan's avatar
wxchan committed
1256
        name : String
1257
            Name of validation data
wxchan's avatar
wxchan committed
1258
        """
Guolin Ke's avatar
Guolin Ke committed
1259
1260
        if data._predictor is not self.__init_predictor:
            raise LightGBMError("Add validation data failed, you should use same predictor for these data")
wxchan's avatar
wxchan committed
1261
1262
        _safe_call(_LIB.LGBM_BoosterAddValidData(
            self.handle,
Guolin Ke's avatar
Guolin Ke committed
1263
            data._get_inner_dataset().handle))
wxchan's avatar
wxchan committed
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
        self.valid_sets.append(data)
        self.name_valid_sets.append(name)
        self.__num_dataset += 1
        self.__inner_predict_buffer.append(None)
        self.__is_predicted_cur_iter.append(False)

    def reset_parameter(self, params):
        """Reset parameters for booster

        Parameters
        ----------
        params : dict
1276
            New parameters for boosters
wxchan's avatar
wxchan committed
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
        silent : boolean, optional
            Whether print messages during construction
        """
        if 'metric' in params:
            self.__need_reload_eval_info = True
        params_str = param_dict_to_str(params)
        if params_str:
            _safe_call(_LIB.LGBM_BoosterResetParameter(
                self.handle,
                c_str(params_str)))

    def update(self, train_set=None, fobj=None):
        """
        Update for one iteration
        Note: for multi-class task, the score is group by class_id first, then group by row_id
              if you want to get i-th row score in j-th class, the access way is score[j*num_data+i]
              and you should group grad and hess in this way as well
1294

wxchan's avatar
wxchan committed
1295
1296
        Parameters
        ----------
1297
1298
        train_set :
            Training data, None means use last training data
wxchan's avatar
wxchan committed
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
        fobj : function
            Customized objective function.

        Returns
        -------
        is_finished, bool
        """

        """need reset training data"""
        if train_set is not None and train_set is not self.train_set:
Guolin Ke's avatar
Guolin Ke committed
1309
1310
            if train_set._predictor is not self.__init_predictor:
                raise LightGBMError("Replace training data failed, you should use same predictor for these data")
wxchan's avatar
wxchan committed
1311
1312
1313
            self.train_set = train_set
            _safe_call(_LIB.LGBM_BoosterResetTrainingData(
                self.handle,
Guolin Ke's avatar
Guolin Ke committed
1314
                self.train_set._get_inner_dataset().handle))
wxchan's avatar
wxchan committed
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
            self.__inner_predict_buffer[0] = None
        is_finished = ctypes.c_int(0)
        if fobj is None:
            _safe_call(_LIB.LGBM_BoosterUpdateOneIter(
                self.handle,
                ctypes.byref(is_finished)))
            self.__is_predicted_cur_iter = [False for _ in range(self.__num_dataset)]
            return is_finished.value == 1
        else:
            grad, hess = fobj(self.__inner_predict(0), self.train_set)
            return self.__boost(grad, hess)

    def __boost(self, grad, hess):
        """
        Boost the booster for one iteration, with customized gradient statistics.
        Note: for multi-class task, the score is group by class_id first, then group by row_id
              if you want to get i-th row score in j-th class, the access way is score[j*num_data+i]
              and you should group grad and hess in this way as well
1333

wxchan's avatar
wxchan committed
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
        Parameters
        ----------
        grad : 1d numpy or 1d list
            The first order of gradient.
        hess : 1d numpy or 1d list
            The second order of gradient.

        Returns
        -------
        is_finished, bool
        """
        if not is_numpy_1d_array(grad):
            if is_1d_list(grad):
                grad = np.array(grad, dtype=np.float32, copy=False)
            else:
                raise TypeError("grad should be numpy 1d array or 1d list")
        if not is_numpy_1d_array(hess):
            if is_1d_list(hess):
                hess = np.array(hess, dtype=np.float32, copy=False)
            else:
                raise TypeError("hess should be numpy 1d array or 1d list")
        if len(grad) != len(hess):
1356
            raise ValueError('grad / hess lengths mismatch: {} / {}'.format(len(grad), len(hess)))
wxchan's avatar
wxchan committed
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
        if grad.dtype != np.float32:
            grad = grad.astype(np.float32, copy=False)
        if hess.dtype != np.float32:
            hess = hess.astype(np.float32, copy=False)
        is_finished = ctypes.c_int(0)
        _safe_call(_LIB.LGBM_BoosterUpdateOneIterCustom(
            self.handle,
            grad.ctypes.data_as(ctypes.POINTER(ctypes.c_float)),
            hess.ctypes.data_as(ctypes.POINTER(ctypes.c_float)),
            ctypes.byref(is_finished)))
        self.__is_predicted_cur_iter = [False for _ in range(self.__num_dataset)]
        return is_finished.value == 1

    def rollback_one_iter(self):
        """
        Rollback one iteration
        """
        _safe_call(_LIB.LGBM_BoosterRollbackOneIter(
            self.handle))
        self.__is_predicted_cur_iter = [False for _ in range(self.__num_dataset)]

    def current_iteration(self):
        out_cur_iter = ctypes.c_int64(0)
        _safe_call(_LIB.LGBM_BoosterGetCurrentIteration(
            self.handle,
            ctypes.byref(out_cur_iter)))
        return out_cur_iter.value

    def eval(self, data, name, feval=None):
        """Evaluate for data

        Parameters
        ----------
Guolin Ke's avatar
Guolin Ke committed
1390
        data : _InnerDataset object
1391
1392
        name :
            Name of data
wxchan's avatar
wxchan committed
1393
1394
1395
1396
1397
1398
1399
        feval : function
            Custom evaluation function.
        Returns
        -------
        result: list
            Evaluation result list.
        """
Guolin Ke's avatar
Guolin Ke committed
1400
1401
        if not isinstance(data, _InnerDataset):
            raise TypeError("Can only eval for _InnerDataset instance")
wxchan's avatar
wxchan committed
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
        data_idx = -1
        if data is self.train_set:
            data_idx = 0
        else:
            for i in range(len(self.valid_sets)):
                if data is self.valid_sets[i]:
                    data_idx = i + 1
                    break
        """need to push new valid data"""
        if data_idx == -1:
            self.add_valid(data, name)
            data_idx = self.__num_dataset - 1

        return self.__inner_eval(name, data_idx, feval)

    def eval_train(self, feval=None):
        """Evaluate for training data

        Parameters
        ----------
        feval : function
            Custom evaluation function.

        Returns
        -------
        result: str
            Evaluation result list.
        """
        return self.__inner_eval(self.__train_data_name, 0, feval)

    def eval_valid(self, feval=None):
        """Evaluate for validation data

        Parameters
        ----------
        feval : function
            Custom evaluation function.

        Returns
        -------
        result: str
            Evaluation result list.
        """
1445
1446
        return [item for i in range(1, self.__num_dataset) \
            for item in self.__inner_eval(self.name_valid_sets[i-1], i, feval)]
wxchan's avatar
wxchan committed
1447
1448
1449
1450
1451
1452
1453

    def save_model(self, filename, num_iteration=-1):
        """Save model of booster to file

        Parameters
        ----------
        filename : str
1454
            Filename to save
wxchan's avatar
wxchan committed
1455
        num_iteration: int
1456
            Number of iteration that want to save. < 0 means save all
wxchan's avatar
wxchan committed
1457
1458
1459
1460
1461
1462
1463
        """
        _safe_call(_LIB.LGBM_BoosterSaveModel(
            self.handle,
            num_iteration,
            c_str(filename)))

    def dump_model(self):
1464
        """Dump model to json format
wxchan's avatar
wxchan committed
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479

        Returns
        -------
        Json format of model
        """
        buffer_len = 1 << 20
        tmp_out_len = ctypes.c_int64(0)
        string_buffer = ctypes.create_string_buffer(buffer_len)
        ptr_string_buffer = ctypes.c_char_p(*[ctypes.addressof(string_buffer)])
        _safe_call(_LIB.LGBM_BoosterDumpModel(
            self.handle,
            buffer_len,
            ctypes.byref(tmp_out_len),
            ctypes.byref(ptr_string_buffer)))
        actual_len = tmp_out_len.value
1480
        '''if buffer length is not long enough, reallocate a buffer'''
wxchan's avatar
wxchan committed
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
        if actual_len > buffer_len:
            string_buffer = ctypes.create_string_buffer(actual_len)
            ptr_string_buffer = ctypes.c_char_p(*[ctypes.addressof(string_buffer)])
            _safe_call(_LIB.LGBM_BoosterDumpModel(
                self.handle,
                actual_len,
                ctypes.byref(tmp_out_len),
                ctypes.byref(ptr_string_buffer)))
        return json.loads(string_buffer.value.decode())

    def predict(self, data, num_iteration=-1, raw_score=False, pred_leaf=False, data_has_header=False, is_reshape=True):
1492
        """Predict logic
wxchan's avatar
wxchan committed
1493
1494
1495
1496
1497

        Parameters
        ----------
        data : string/numpy array/scipy.sparse
            Data source for prediction
1498
            When data type is string, it represents the path of txt file
wxchan's avatar
wxchan committed
1499
        num_iteration : int
1500
            Used iteration for prediction
wxchan's avatar
wxchan committed
1501
1502
1503
1504
1505
1506
1507
        raw_score : bool
            True for predict raw score
        pred_leaf : bool
            True for predict leaf index
        data_has_header : bool
            Used for txt data
        is_reshape : bool
1508
            Reshape to (nrow, ncol) if true
wxchan's avatar
wxchan committed
1509
1510
1511
1512
1513

        Returns
        -------
        Prediction result
        """
Guolin Ke's avatar
Guolin Ke committed
1514
        predictor = _InnerPredictor(booster_handle=self.handle)
wxchan's avatar
wxchan committed
1515
1516
        return predictor.predict(data, num_iteration, raw_score, pred_leaf, data_has_header, is_reshape)

Guolin Ke's avatar
Guolin Ke committed
1517
    def _to_predictor(self):
wxchan's avatar
wxchan committed
1518
1519
        """Convert to predictor
        """
Guolin Ke's avatar
Guolin Ke committed
1520
        predictor = _InnerPredictor(booster_handle=self.handle)
wxchan's avatar
wxchan committed
1521
1522
        return predictor

1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
    def feature_importance(self, importance_type='split'):
        """Feature importances

        Returns
        -------
        Array of feature importances
        """
        if importance_type not in ["split", "gain"]:
            raise KeyError("importance_type must be split or gain")
        dump_model = self.dump_model()
        ret = [0] * (dump_model["max_feature_idx"] + 1)
        def dfs(root):
            if "split_feature" in root:
                if importance_type == 'split':
                    ret[root["split_feature"]] += 1
                elif importance_type == 'gain':
                    ret[root["split_feature"]] += root["split_gain"]
                dfs(root["left_child"])
                dfs(root["right_child"])
        for tree in dump_model["tree_info"]:
            dfs(tree["tree_structure"])
        return np.array(ret)

wxchan's avatar
wxchan committed
1546
1547
    def __inner_eval(self, data_name, data_idx, feval=None):
        """
1548
        Evaulate training or validation data
wxchan's avatar
wxchan committed
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
        """
        if data_idx >= self.__num_dataset:
            raise ValueError("data_idx should be smaller than number of dataset")
        self.__get_eval_info()
        ret = []
        if self.__num_inner_eval > 0:
            result = np.array([0.0 for _ in range(self.__num_inner_eval)], dtype=np.float32)
            tmp_out_len = ctypes.c_int64(0)
            _safe_call(_LIB.LGBM_BoosterGetEval(
                self.handle,
                data_idx,
                ctypes.byref(tmp_out_len),
                result.ctypes.data_as(ctypes.POINTER(ctypes.c_float))))
            if tmp_out_len.value != self.__num_inner_eval:
                raise ValueError("incorrect number of eval results")
            for i in range(self.__num_inner_eval):
                ret.append((data_name, self.__name_inner_eval[i], result[i], self.__higher_better_inner_eval[i]))
        if feval is not None:
            if data_idx == 0:
                cur_data = self.train_set
            else:
                cur_data = self.valid_sets[data_idx - 1]
            feval_ret = feval(self.__inner_predict(data_idx), cur_data)
            if isinstance(feval_ret, list):
                for eval_name, val, is_higher_better in feval_ret:
                    ret.append((data_name, eval_name, val, is_higher_better))
            else:
                eval_name, val, is_higher_better = feval_ret
                ret.append((data_name, eval_name, val, is_higher_better))
        return ret

    def __inner_predict(self, data_idx):
        """
        Predict for training and validation dataset
        """
        if data_idx >= self.__num_dataset:
            raise ValueError("data_idx should be smaller than number of dataset")
        if self.__inner_predict_buffer[data_idx] is None:
            if data_idx == 0:
                n_preds = self.train_set.num_data() * self.__num_class
            else:
                n_preds = self.valid_sets[data_idx - 1].num_data() * self.__num_class
            self.__inner_predict_buffer[data_idx] = \
                np.array([0.0 for _ in range(n_preds)], dtype=np.float32, copy=False)
        """avoid to predict many time in one iteration"""
        if not self.__is_predicted_cur_iter[data_idx]:
            tmp_out_len = ctypes.c_int64(0)
            data_ptr = self.__inner_predict_buffer[data_idx].ctypes.data_as(ctypes.POINTER(ctypes.c_float))
            _safe_call(_LIB.LGBM_BoosterGetPredict(
                self.handle,
                data_idx,
                ctypes.byref(tmp_out_len),
                data_ptr))
            if tmp_out_len.value != len(self.__inner_predict_buffer[data_idx]):
                raise ValueError("incorrect number of predict results for data %d" % (data_idx))
            self.__is_predicted_cur_iter[data_idx] = True
        return self.__inner_predict_buffer[data_idx]

    def __get_eval_info(self):
        """
        Get inner evaluation count and names
        """
        if self.__need_reload_eval_info:
            self.__need_reload_eval_info = False
            out_num_eval = ctypes.c_int64(0)
            """Get num of inner evals"""
            _safe_call(_LIB.LGBM_BoosterGetEvalCounts(
                self.handle,
                ctypes.byref(out_num_eval)))
            self.__num_inner_eval = out_num_eval.value
            if self.__num_inner_eval > 0:
                """Get name of evals"""
                tmp_out_len = ctypes.c_int64(0)
                string_buffers = [ctypes.create_string_buffer(255) for i in range(self.__num_inner_eval)]
                ptr_string_buffers = (ctypes.c_char_p*self.__num_inner_eval)(*map(ctypes.addressof, string_buffers))
                _safe_call(_LIB.LGBM_BoosterGetEvalNames(
                    self.handle,
                    ctypes.byref(tmp_out_len),
                    ptr_string_buffers))
                if self.__num_inner_eval != tmp_out_len.value:
                    raise ValueError("size of eval names doesn't equal with num_evals")
1630
1631
1632
1633
1634
                self.__name_inner_eval = \
                    [string_buffers[i].value.decode() for i in range(self.__num_inner_eval)]
                self.__higher_better_inner_eval = \
                    [name.startswith(('auc', 'ndcg')) for name in self.__name_inner_eval]

wxchan's avatar
wxchan committed
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
    def attr(self, key):
        """Get attribute string from the Booster.

        Parameters
        ----------
        key : str
            The key to get attribute from.

        Returns
        -------
        value : str
            The attribute value of the key, returns None if attribute do not exist.
        """
1648
        return self.__attr.get(key, None)
wxchan's avatar
wxchan committed
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660

    def set_attr(self, **kwargs):
        """Set the attribute of the Booster.

        Parameters
        ----------
        **kwargs
            The attributes to set. Setting a value to None deletes an attribute.
        """
        for key, value in kwargs.items():
            if value is not None:
                if not is_str(value):
1661
                    raise ValueError("set_attr only accepts string values")
wxchan's avatar
wxchan committed
1662
1663
1664
                self.__attr[key] = value
            else:
                self.__attr.pop(key, None)