"include/git@developer.sourcefind.cn:tianlh/lightgbm-dcu.git" did not exist on "7b6f80f350f41d522535d5f62ef9ad7a0cbb0f85"
Unverified Commit 7b8b5151 authored by Germán Ramírez-Espinoza's avatar Germán Ramírez-Espinoza Committed by GitHub
Browse files

[python][scikit-learn] Fixes a bug that prevented using multiple eval_metrics...


[python][scikit-learn] Fixes a bug that prevented using multiple eval_metrics in LGBMClassifier (#3222)

* Fixes a bug that prevented using multiple eval_metrics in LGBMClassifier

* Move bug-fix test to the test_metrics unit-test

* Fix test to avoid issues with existing tests

* Fix coding-style error
Co-authored-by: default avatarGerman I Ramirez-Espinoza <gire@home>
parent 61b3c308
...@@ -782,20 +782,28 @@ class LGBMClassifier(LGBMModel, _LGBMClassifierBase): ...@@ -782,20 +782,28 @@ class LGBMClassifier(LGBMModel, _LGBMClassifierBase):
self._classes = self._le.classes_ self._classes = self._le.classes_
self._n_classes = len(self._classes) self._n_classes = len(self._classes)
if self._n_classes > 2: if self._n_classes > 2:
# Switch to using a multiclass objective in the underlying LGBM instance # Switch to using a multiclass objective in the underlying LGBM instance
ova_aliases = {"multiclassova", "multiclass_ova", "ova", "ovr"} ova_aliases = {"multiclassova", "multiclass_ova", "ova", "ovr"}
if self._objective not in ova_aliases and not callable(self._objective): if self._objective not in ova_aliases and not callable(self._objective):
self._objective = "multiclass" self._objective = "multiclass"
if eval_metric in {'logloss', 'binary_logloss'}:
eval_metric = "multi_logloss" if not callable(eval_metric):
elif eval_metric in {'error', 'binary_error'}: if isinstance(eval_metric, (string_type, type(None))):
eval_metric = "multi_error" eval_metric = [eval_metric]
if self._n_classes > 2:
for index, metric in enumerate(eval_metric):
if metric in {'logloss', 'binary_logloss'}:
eval_metric[index] = "multi_logloss"
elif metric in {'error', 'binary_error'}:
eval_metric[index] = "multi_error"
else: else:
if eval_metric in {'logloss', 'multi_logloss'}: for index, metric in enumerate(eval_metric):
eval_metric = 'binary_logloss' if metric in {'logloss', 'multi_logloss'}:
elif eval_metric in {'error', 'multi_error'}: eval_metric[index] = 'binary_logloss'
eval_metric = 'binary_error' elif metric in {'error', 'multi_error'}:
eval_metric[index] = 'binary_error'
# do not modify args, as it causes errors in model selection tools # do not modify args, as it causes errors in model selection tools
valid_sets = None valid_sets = None
......
...@@ -540,6 +540,20 @@ class TestSklearn(unittest.TestCase): ...@@ -540,6 +540,20 @@ class TestSklearn(unittest.TestCase):
self.assertIn('l2', gbm.evals_result_['training']) self.assertIn('l2', gbm.evals_result_['training'])
self.assertIn('mape', gbm.evals_result_['training']) self.assertIn('mape', gbm.evals_result_['training'])
# non-default metric with multiple metrics in eval_metric for LGBMClassifier
X_classification, y_classification = load_breast_cancer(True)
params_classification = {'n_estimators': 2, 'verbose': -1,
'objective': 'binary', 'metric': 'binary_logloss'}
params_fit_classification = {'X': X_classification, 'y': y_classification,
'eval_set': (X_classification, y_classification),
'verbose': False}
gbm = lgb.LGBMClassifier(**params_classification).fit(eval_metric=['fair', 'error'],
**params_fit_classification)
self.assertEqual(len(gbm.evals_result_['training']), 3)
self.assertIn('fair', gbm.evals_result_['training'])
self.assertIn('binary_error', gbm.evals_result_['training'])
self.assertIn('binary_logloss', gbm.evals_result_['training'])
# default metric for non-default objective # default metric for non-default objective
gbm = lgb.LGBMRegressor(objective='regression_l1', **params).fit(**params_fit) gbm = lgb.LGBMRegressor(objective='regression_l1', **params).fit(**params_fit)
self.assertEqual(len(gbm.evals_result_['training']), 1) self.assertEqual(len(gbm.evals_result_['training']), 1)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment