Commit 2459362a authored by Nikita Titov's avatar Nikita Titov Committed by Guolin Ke
Browse files

[python] fixed picklability of sklearn models with custom obj and updated...

[python] fixed picklability of sklearn models with custom obj and updated docstings for custom obj (#2191)

* refactored joblib test

* fixed picklability of sklearn models with custom obj and updated docstings for custom obj

* pickled model should be able to predict without refitting
parent e5b6e50e
...@@ -59,7 +59,7 @@ def is_numeric(obj): ...@@ -59,7 +59,7 @@ def is_numeric(obj):
def is_numpy_1d_array(data): def is_numpy_1d_array(data):
"""Check whether data is a 1-D numpy array.""" """Check whether data is a numpy 1-D array."""
return isinstance(data, np.ndarray) and len(data.shape) == 1 return isinstance(data, np.ndarray) and len(data.shape) == 1
...@@ -69,7 +69,7 @@ def is_1d_list(data): ...@@ -69,7 +69,7 @@ def is_1d_list(data):
def list_to_1d_numpy(data, dtype=np.float32, name='list'): def list_to_1d_numpy(data, dtype=np.float32, name='list'):
"""Convert data to 1-D numpy array.""" """Convert data to numpy 1-D array."""
if is_numpy_1d_array(data): if is_numpy_1d_array(data):
if data.dtype == dtype: if data.dtype == dtype:
return data return data
...@@ -1853,9 +1853,20 @@ class Booster(object): ...@@ -1853,9 +1853,20 @@ class Booster(object):
If None, last training data is used. If None, last training data is used.
fobj : callable or None, optional (default=None) fobj : callable or None, optional (default=None)
Customized objective function. Customized objective function.
Should accept two parameters: preds, train_data,
and return (grad, hess).
For multi-class task, the score is group by class_id first, then group by row_id. preds : list or numpy 1-D array
If you want to get i-th row score in j-th class, the access way is score[j * num_data + i] The predicted values.
train_data : Dataset
The training dataset.
grad : list or numpy 1-D array
The value of the first order derivative (gradient) for each sample point.
hess : list or numpy 1-D array
The value of the second order derivative (Hessian) for each sample point.
For multi-class task, the preds is group by class_id first, then group by row_id.
If you want to get i-th row preds in j-th class, the access way is score[j * num_data + i]
and you should group grad and hess in this way as well. and you should group grad and hess in this way as well.
Returns Returns
...@@ -1902,9 +1913,9 @@ class Booster(object): ...@@ -1902,9 +1913,9 @@ class Booster(object):
Parameters Parameters
---------- ----------
grad : 1-D numpy array or 1-D list grad : list or numpy 1-D array
The first order derivative (gradient). The first order derivative (gradient).
hess : 1-D numpy array or 1-D list hess : list or numpy 1-D array
The second order derivative (Hessian). The second order derivative (Hessian).
Returns Returns
...@@ -1994,8 +2005,20 @@ class Booster(object): ...@@ -1994,8 +2005,20 @@ class Booster(object):
Name of the data. Name of the data.
feval : callable or None, optional (default=None) feval : callable or None, optional (default=None)
Customized evaluation function. Customized evaluation function.
Should accept two parameters: preds, train_data, Should accept two parameters: preds, eval_data,
and return (eval_name, eval_result, is_higher_better) or list of such tuples. and return (eval_name, eval_result, is_higher_better) or list of such tuples.
preds : list or numpy 1-D array
The predicted values.
eval_data : Dataset
The evaluation dataset.
eval_name : string
The name of evaluation function.
eval_result : float
The eval result.
is_higher_better : bool
Is eval result higher better, e.g. AUC is ``is_higher_better``.
For multi-class task, the preds is group by class_id first, then group by row_id. For multi-class task, the preds is group by class_id first, then group by row_id.
If you want to get i-th row preds in j-th class, the access way is preds[j * num_data + i]. If you want to get i-th row preds in j-th class, the access way is preds[j * num_data + i].
...@@ -2030,6 +2053,18 @@ class Booster(object): ...@@ -2030,6 +2053,18 @@ class Booster(object):
Customized evaluation function. Customized evaluation function.
Should accept two parameters: preds, train_data, Should accept two parameters: preds, train_data,
and return (eval_name, eval_result, is_higher_better) or list of such tuples. and return (eval_name, eval_result, is_higher_better) or list of such tuples.
preds : list or numpy 1-D array
The predicted values.
train_data : Dataset
The training dataset.
eval_name : string
The name of evaluation function.
eval_result : float
The eval result.
is_higher_better : bool
Is eval result higher better, e.g. AUC is ``is_higher_better``.
For multi-class task, the preds is group by class_id first, then group by row_id. For multi-class task, the preds is group by class_id first, then group by row_id.
If you want to get i-th row preds in j-th class, the access way is preds[j * num_data + i]. If you want to get i-th row preds in j-th class, the access way is preds[j * num_data + i].
...@@ -2047,8 +2082,20 @@ class Booster(object): ...@@ -2047,8 +2082,20 @@ class Booster(object):
---------- ----------
feval : callable or None, optional (default=None) feval : callable or None, optional (default=None)
Customized evaluation function. Customized evaluation function.
Should accept two parameters: preds, train_data, Should accept two parameters: preds, valid_data,
and return (eval_name, eval_result, is_higher_better) or list of such tuples. and return (eval_name, eval_result, is_higher_better) or list of such tuples.
preds : list or numpy 1-D array
The predicted values.
valid_data : Dataset
The validation dataset.
eval_name : string
The name of evaluation function.
eval_result : float
The eval result.
is_higher_better : bool
Is eval result higher better, e.g. AUC is ``is_higher_better``.
For multi-class task, the preds is group by class_id first, then group by row_id. For multi-class task, the preds is group by class_id first, then group by row_id.
If you want to get i-th row preds in j-th class, the access way is preds[j * num_data + i]. If you want to get i-th row preds in j-th class, the access way is preds[j * num_data + i].
......
...@@ -39,10 +39,38 @@ def train(params, train_set, num_boost_round=100, ...@@ -39,10 +39,38 @@ def train(params, train_set, num_boost_round=100,
Names of ``valid_sets``. Names of ``valid_sets``.
fobj : callable or None, optional (default=None) fobj : callable or None, optional (default=None)
Customized objective function. Customized objective function.
Should accept two parameters: preds, train_data,
and return (grad, hess).
preds : list or numpy 1-D array
The predicted values.
train_data : Dataset
The training dataset.
grad : list or numpy 1-D array
The value of the first order derivative (gradient) for each sample point.
hess : list or numpy 1-D array
The value of the second order derivative (Hessian) for each sample point.
For multi-class task, the preds is group by class_id first, then group by row_id.
If you want to get i-th row preds in j-th class, the access way is score[j * num_data + i]
and you should group grad and hess in this way as well.
feval : callable or None, optional (default=None) feval : callable or None, optional (default=None)
Customized evaluation function. Customized evaluation function.
Should accept two parameters: preds, train_data, Should accept two parameters: preds, train_data,
and return (eval_name, eval_result, is_higher_better) or list of such tuples. and return (eval_name, eval_result, is_higher_better) or list of such tuples.
preds : list or numpy 1-D array
The predicted values.
train_data : Dataset
The training dataset.
eval_name : string
The name of evaluation function.
eval_result : float
The eval result.
is_higher_better : bool
Is eval result higher better, e.g. AUC is ``is_higher_better``.
For multi-class task, the preds is group by class_id first, then group by row_id. For multi-class task, the preds is group by class_id first, then group by row_id.
If you want to get i-th row preds in j-th class, the access way is preds[j * num_data + i]. If you want to get i-th row preds in j-th class, the access way is preds[j * num_data + i].
To ignore the default metric corresponding to the used objective, To ignore the default metric corresponding to the used objective,
...@@ -373,11 +401,39 @@ def cv(params, train_set, num_boost_round=100, ...@@ -373,11 +401,39 @@ def cv(params, train_set, num_boost_round=100,
Evaluation metrics to be monitored while CV. Evaluation metrics to be monitored while CV.
If not None, the metric in ``params`` will be overridden. If not None, the metric in ``params`` will be overridden.
fobj : callable or None, optional (default=None) fobj : callable or None, optional (default=None)
Custom objective function. Customized objective function.
Should accept two parameters: preds, train_data,
and return (grad, hess).
preds : list or numpy 1-D array
The predicted values.
train_data : Dataset
The training dataset.
grad : list or numpy 1-D array
The value of the first order derivative (gradient) for each sample point.
hess : list or numpy 1-D array
The value of the second order derivative (Hessian) for each sample point.
For multi-class task, the preds is group by class_id first, then group by row_id.
If you want to get i-th row preds in j-th class, the access way is score[j * num_data + i]
and you should group grad and hess in this way as well.
feval : callable or None, optional (default=None) feval : callable or None, optional (default=None)
Customized evaluation function. Customized evaluation function.
Should accept two parameters: preds, train_data, Should accept two parameters: preds, train_data,
and return (eval_name, eval_result, is_higher_better) or list of such tuples. and return (eval_name, eval_result, is_higher_better) or list of such tuples.
preds : list or numpy 1-D array
The predicted values.
train_data : Dataset
The training dataset.
eval_name : string
The name of evaluation function.
eval_result : float
The eval result.
is_higher_better : bool
Is eval result higher better, e.g. AUC is ``is_higher_better``.
For multi-class task, the preds is group by class_id first, then group by row_id. For multi-class task, the preds is group by class_id first, then group by row_id.
If you want to get i-th row preds in j-th class, the access way is preds[j * num_data + i]. If you want to get i-th row preds in j-th class, the access way is preds[j * num_data + i].
To ignore the default metric corresponding to the used objective, To ignore the default metric corresponding to the used objective,
......
...@@ -15,19 +15,20 @@ from .compat import (SKLEARN_INSTALLED, _LGBMClassifierBase, ...@@ -15,19 +15,20 @@ from .compat import (SKLEARN_INSTALLED, _LGBMClassifierBase,
from .engine import train from .engine import train
def _objective_function_wrapper(func): class _ObjectiveFunctionWrapper(object):
"""Decorate an objective function. """Proxy class for objective function."""
Note def __init__(self, func):
---- """Construct a proxy class.
For multi-class task, the y_pred is group by class_id first, then group by row_id.
If you want to get i-th row y_pred in j-th class, the access way is y_pred[j * num_data + i] This class transforms objective function to match objective function with signature ``new_func(preds, dataset)``
and you should group grad and hess in this way as well. as expected by ``lightgbm.engine.train``.
Parameters Parameters
---------- ----------
func : callable func : callable
Expects a callable with signature ``func(y_true, y_pred)`` or ``func(y_true, y_pred, group): Expects a callable with signature ``func(y_true, y_pred)`` or ``func(y_true, y_pred, group)
and returns (grad, hess):
y_true : array-like of shape = [n_samples] y_true : array-like of shape = [n_samples]
The target values. The target values.
...@@ -35,26 +36,42 @@ def _objective_function_wrapper(func): ...@@ -35,26 +36,42 @@ def _objective_function_wrapper(func):
The predicted values. The predicted values.
group : array-like group : array-like
Group/query data, used for ranking task. Group/query data, used for ranking task.
grad : array-like of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task)
The value of the first order derivative (gradient) for each sample point.
hess : array-like of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task)
The value of the second order derivative (Hessian) for each sample point.
Returns Note
------- ----
new_func : callable For multi-class task, the y_pred is group by class_id first, then group by row_id.
The new objective function as expected by ``lightgbm.engine.train``. If you want to get i-th row y_pred in j-th class, the access way is y_pred[j * num_data + i]
The signature is ``new_func(preds, dataset)``: and you should group grad and hess in this way as well.
"""
self.func = func
def __call__(self, preds, dataset):
"""Call passed function with appropriate arguments.
Parameters
----------
preds : array-like of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task) preds : array-like of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task)
The predicted values. The predicted values.
dataset : Dataset dataset : Dataset
The training set from which the labels will be extracted using ``dataset.get_label()``. The training dataset.
Returns
-------
grad : array-like of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task)
The value of the first order derivative (gradient) for each sample point.
hess : array-like of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task)
The value of the second order derivative (Hessian) for each sample point.
""" """
def inner(preds, dataset):
"""Call passed function with appropriate arguments."""
labels = dataset.get_label() labels = dataset.get_label()
argc = argc_(func) argc = argc_(self.func)
if argc == 2: if argc == 2:
grad, hess = func(labels, preds) grad, hess = self.func(labels, preds)
elif argc == 3: elif argc == 3:
grad, hess = func(labels, preds, dataset.get_group()) grad, hess = self.func(labels, preds, dataset.get_group())
else: else:
raise TypeError("Self-defined objective function should have 2 or 3 arguments, got %d" % argc) raise TypeError("Self-defined objective function should have 2 or 3 arguments, got %d" % argc)
"""weighted for objective""" """weighted for objective"""
...@@ -75,16 +92,16 @@ def _objective_function_wrapper(func): ...@@ -75,16 +92,16 @@ def _objective_function_wrapper(func):
grad[idx] *= weight[i] grad[idx] *= weight[i]
hess[idx] *= weight[i] hess[idx] *= weight[i]
return grad, hess return grad, hess
return inner
def _eval_function_wrapper(func): class _EvalFunctionWrapper(object):
"""Decorate an eval function. """Proxy class for evaluation function."""
Note def __init__(self, func):
---- """Construct a proxy class.
For multi-class task, the y_pred is group by class_id first, then group by row_id.
If you want to get i-th row y_pred in j-th class, the access way is y_pred[j * num_data + i]. This class transforms evaluation function to match evaluation function with signature ``new_func(preds, dataset)``
as expected by ``lightgbm.engine.train``.
Parameters Parameters
---------- ----------
...@@ -93,7 +110,8 @@ def _eval_function_wrapper(func): ...@@ -93,7 +110,8 @@ def _eval_function_wrapper(func):
``func(y_true, y_pred)``, ``func(y_true, y_pred)``,
``func(y_true, y_pred, weight)`` ``func(y_true, y_pred, weight)``
or ``func(y_true, y_pred, weight, group)`` or ``func(y_true, y_pred, weight, group)``
and returns (eval_name->string, eval_result->float, is_bigger_better->bool): and returns (eval_name, eval_result, is_higher_better) or
list of (eval_name, eval_result, is_higher_better):
y_true : array-like of shape = [n_samples] y_true : array-like of shape = [n_samples]
The target values. The target values.
...@@ -103,31 +121,49 @@ def _eval_function_wrapper(func): ...@@ -103,31 +121,49 @@ def _eval_function_wrapper(func):
The weight of samples. The weight of samples.
group : array-like group : array-like
Group/query data, used for ranking task. Group/query data, used for ranking task.
eval_name : string
The name of evaluation function.
eval_result : float
The eval result.
is_higher_better : bool
Is eval result higher better, e.g. AUC is ``is_higher_better``.
Returns Note
------- ----
new_func : callable For multi-class task, the y_pred is group by class_id first, then group by row_id.
The new eval function as expected by ``lightgbm.engine.train``. If you want to get i-th row y_pred in j-th class, the access way is y_pred[j * num_data + i].
The signature is ``new_func(preds, dataset)``: """
self.func = func
def __call__(self, preds, dataset):
"""Call passed function with appropriate arguments.
Parameters
----------
preds : array-like of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task) preds : array-like of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task)
The predicted values. The predicted values.
dataset : Dataset dataset : Dataset
The training set from which the labels will be extracted using ``dataset.get_label()``. The training dataset.
Returns
-------
eval_name : string
The name of evaluation function.
eval_result : float
The eval result.
is_higher_better : bool
Is eval result higher better, e.g. AUC is ``is_higher_better``.
""" """
def inner(preds, dataset):
"""Call passed function with appropriate arguments."""
labels = dataset.get_label() labels = dataset.get_label()
argc = argc_(func) argc = argc_(self.func)
if argc == 2: if argc == 2:
return func(labels, preds) return self.func(labels, preds)
elif argc == 3: elif argc == 3:
return func(labels, preds, dataset.get_weight()) return self.func(labels, preds, dataset.get_weight())
elif argc == 4: elif argc == 4:
return func(labels, preds, dataset.get_weight(), dataset.get_group()) return self.func(labels, preds, dataset.get_weight(), dataset.get_group())
else: else:
raise TypeError("Self-defined eval function should have 2, 3 or 4 arguments, got %d" % argc) raise TypeError("Self-defined eval function should have 2, 3 or 4 arguments, got %d" % argc)
return inner
class LGBMModel(_LGBMModelBase): class LGBMModel(_LGBMModelBase):
...@@ -248,9 +284,9 @@ class LGBMModel(_LGBMModelBase): ...@@ -248,9 +284,9 @@ class LGBMModel(_LGBMModelBase):
group : array-like group : array-like
Group/query data, used for ranking task. Group/query data, used for ranking task.
grad : array-like of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task) grad : array-like of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task)
The value of the gradient for each sample point. The value of the first order derivative (gradient) for each sample point.
hess : array-like of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task) hess : array-like of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task)
The value of the second derivative for each sample point. The value of the second order derivative (Hessian) for each sample point.
For multi-class task, the y_pred is group by class_id first, then group by row_id. For multi-class task, the y_pred is group by class_id first, then group by row_id.
If you want to get i-th row y_pred in j-th class, the access way is y_pred[j * num_data + i] If you want to get i-th row y_pred in j-th class, the access way is y_pred[j * num_data + i]
...@@ -414,8 +450,8 @@ class LGBMModel(_LGBMModelBase): ...@@ -414,8 +450,8 @@ class LGBMModel(_LGBMModelBase):
Custom eval function expects a callable with following signatures: Custom eval function expects a callable with following signatures:
``func(y_true, y_pred)``, ``func(y_true, y_pred, weight)`` or ``func(y_true, y_pred)``, ``func(y_true, y_pred, weight)`` or
``func(y_true, y_pred, weight, group)`` ``func(y_true, y_pred, weight, group)``
and returns (eval_name, eval_result, is_bigger_better) or and returns (eval_name, eval_result, is_higher_better) or
list of (eval_name, eval_result, is_bigger_better): list of (eval_name, eval_result, is_higher_better):
y_true : array-like of shape = [n_samples] y_true : array-like of shape = [n_samples]
The target values. The target values.
...@@ -426,11 +462,11 @@ class LGBMModel(_LGBMModelBase): ...@@ -426,11 +462,11 @@ class LGBMModel(_LGBMModelBase):
group : array-like group : array-like
Group/query data, used for ranking task. Group/query data, used for ranking task.
eval_name : string eval_name : string
The name of evaluation. The name of evaluation function.
eval_result : float eval_result : float
The eval result. The eval result.
is_bigger_better : bool is_higher_better : bool
Is eval result bigger better, e.g. AUC is bigger_better. Is eval result higher better, e.g. AUC is ``is_higher_better``.
For multi-class task, the y_pred is group by class_id first, then group by row_id. For multi-class task, the y_pred is group by class_id first, then group by row_id.
If you want to get i-th row y_pred in j-th class, the access way is y_pred[j * num_data + i]. If you want to get i-th row y_pred in j-th class, the access way is y_pred[j * num_data + i].
...@@ -445,7 +481,7 @@ class LGBMModel(_LGBMModelBase): ...@@ -445,7 +481,7 @@ class LGBMModel(_LGBMModelBase):
else: else:
raise ValueError("Unknown LGBMModel type.") raise ValueError("Unknown LGBMModel type.")
if callable(self._objective): if callable(self._objective):
self._fobj = _objective_function_wrapper(self._objective) self._fobj = _ObjectiveFunctionWrapper(self._objective)
else: else:
self._fobj = None self._fobj = None
evals_result = {} evals_result = {}
...@@ -466,7 +502,7 @@ class LGBMModel(_LGBMModelBase): ...@@ -466,7 +502,7 @@ class LGBMModel(_LGBMModelBase):
params['objective'] = 'None' # objective = nullptr for unknown objective params['objective'] = 'None' # objective = nullptr for unknown objective
if callable(eval_metric): if callable(eval_metric):
feval = _eval_function_wrapper(eval_metric) feval = _EvalFunctionWrapper(eval_metric)
else: else:
feval = None feval = None
# register default metric for consistency with callable eval_metric case # register default metric for consistency with callable eval_metric case
......
...@@ -26,6 +26,17 @@ def multi_logloss(y_true, y_pred): ...@@ -26,6 +26,17 @@ def multi_logloss(y_true, y_pred):
return np.mean([-math.log(y_pred[i][y]) for i, y in enumerate(y_true)]) return np.mean([-math.log(y_pred[i][y]) for i, y in enumerate(y_true)])
def custom_asymmetric_obj(y_true, y_pred):
residual = (y_true - y_pred).astype("float")
grad = np.where(residual < 0, -2 * 10.0 * residual, -2 * residual)
hess = np.where(residual < 0, 2 * 10.0, 2.0)
return grad, hess
def mse(y_true, y_pred):
return 'custom MSE', mean_squared_error(y_true, y_pred), False
class TestSklearn(unittest.TestCase): class TestSklearn(unittest.TestCase):
def test_binary(self): def test_binary(self):
...@@ -143,27 +154,27 @@ class TestSklearn(unittest.TestCase): ...@@ -143,27 +154,27 @@ class TestSklearn(unittest.TestCase):
def test_joblib(self): def test_joblib(self):
X, y = load_boston(True) X, y = load_boston(True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
gbm = lgb.LGBMRegressor(n_estimators=100, silent=True) gbm = lgb.LGBMRegressor(n_estimators=10, objective=custom_asymmetric_obj,
gbm.fit(X_train, y_train, eval_set=[(X_test, y_test)], early_stopping_rounds=10, verbose=False) silent=True, importance_type='split')
gbm.fit(X_train, y_train, eval_set=[(X_train, y_train), (X_test, y_test)],
eval_metric=mse, early_stopping_rounds=5, verbose=False,
callbacks=[lgb.reset_parameter(learning_rate=list(np.arange(1, 0, -0.1)))])
joblib.dump(gbm, 'lgb.pkl') joblib.dump(gbm, 'lgb.pkl') # test model with custom functions
gbm_pickle = joblib.load('lgb.pkl') gbm_pickle = joblib.load('lgb.pkl')
self.assertIsInstance(gbm_pickle.booster_, lgb.Booster) self.assertIsInstance(gbm_pickle.booster_, lgb.Booster)
self.assertDictEqual(gbm.get_params(), gbm_pickle.get_params()) self.assertDictEqual(gbm.get_params(), gbm_pickle.get_params())
self.assertListEqual(list(gbm.feature_importances_), list(gbm_pickle.feature_importances_)) np.testing.assert_array_equal(gbm.feature_importances_, gbm_pickle.feature_importances_)
self.assertAlmostEqual(gbm_pickle.learning_rate, 0.1)
X, y = load_boston(True) self.assertTrue(callable(gbm_pickle.objective))
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
gbm.fit(X_train, y_train, eval_set=[(X_test, y_test)], verbose=False) for eval_set in gbm.evals_result_:
gbm_pickle.fit(X_train, y_train, eval_set=[(X_test, y_test)], verbose=False) for metric in gbm.evals_result_[eval_set]:
for key in gbm.evals_result_: np.testing.assert_array_almost_equal(gbm.evals_result_[eval_set][metric],
for evals in zip(gbm.evals_result_[key], gbm_pickle.evals_result_[key]): gbm_pickle.evals_result_[eval_set][metric])
self.assertAlmostEqual(*evals, places=5)
pred_origin = gbm.predict(X_test) pred_origin = gbm.predict(X_test)
pred_pickle = gbm_pickle.predict(X_test) pred_pickle = gbm_pickle.predict(X_test)
self.assertEqual(len(pred_origin), len(pred_pickle)) np.testing.assert_array_almost_equal(pred_origin, pred_pickle)
for preds in zip(pred_origin, pred_pickle):
self.assertAlmostEqual(*preds, places=5)
def test_feature_importances_single_leaf(self): def test_feature_importances_single_leaf(self):
clf = lgb.LGBMClassifier(n_estimators=100) clf = lgb.LGBMClassifier(n_estimators=100)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment