Unverified Commit 2caf945f authored by Nikita Titov's avatar Nikita Titov Committed by GitHub
Browse files

[python] Remove `silent` argument (#4800)

* Update test_plotting.py

* Update dask.py

* Update sklearn.py

* Update test_sklearn.py

* Update basic.py

* Update engine.py

* Update test_engine.py

* Update basic.py

* Update basic.py

* Update engine.py
parent 65ee8ab2
......@@ -412,8 +412,6 @@ class _ConfigAliases:
"two_round": {"two_round",
"two_round_loading",
"use_two_round_loading"},
"verbosity": {"verbosity",
"verbose"},
"weight_column": {"weight_column",
"weight"}}
......@@ -1173,7 +1171,7 @@ class Dataset:
"""Dataset in LightGBM."""
def __init__(self, data, label=None, reference=None,
weight=None, group=None, init_score=None, silent='warn',
weight=None, group=None, init_score=None,
feature_name='auto', categorical_feature='auto', params=None,
free_raw_data=True):
"""Initialize Dataset.
......@@ -1197,8 +1195,6 @@ class Dataset:
where the first 10 records are in the first group, records 11-30 are in the second group, records 31-70 are in the third group, etc.
init_score : list, list of lists (for multi-class task), numpy array, pandas Series, pandas DataFrame (for multi-class task), or None, optional (default=None)
Init score for Dataset.
silent : bool, optional (default=False)
Whether to print messages during construction.
feature_name : list of str, or 'auto', optional (default="auto")
Feature names.
If 'auto' and data is pandas DataFrame, data columns names are used.
......@@ -1223,7 +1219,6 @@ class Dataset:
self.weight = weight
self.group = group
self.init_score = init_score
self.silent = silent
self.feature_name = feature_name
self.categorical_feature = categorical_feature
self.params = deepcopy(params)
......@@ -1465,8 +1460,7 @@ class Dataset:
def _lazy_init(self, data, label=None, reference=None,
weight=None, group=None, init_score=None, predictor=None,
silent=False, feature_name='auto',
categorical_feature='auto', params=None):
feature_name='auto', categorical_feature='auto', params=None):
if data is None:
self.handle = None
return self
......@@ -1488,14 +1482,6 @@ class Dataset:
if key in args_names:
_log_warning(f'{key} keyword has been found in `params` and will be ignored.\n'
f'Please use {key} argument of the Dataset constructor to pass this parameter.')
# user can set verbose with params, it has higher priority
if silent != "warn":
_log_warning("'silent' argument is deprecated and will be removed in a future release of LightGBM. "
"Pass 'verbose' parameter via 'params' instead.")
else:
silent = False
if not any(verbose_alias in params for verbose_alias in _ConfigAliases.get("verbosity")) and silent:
params["verbose"] = -1
# get categorical features
if categorical_feature is not None:
categorical_indices = set()
......@@ -1816,7 +1802,7 @@ class Dataset:
self._lazy_init(self.data, label=self.label, reference=self.reference,
weight=self.weight, group=self.group,
init_score=self.init_score, predictor=self._predictor,
silent=self.silent, feature_name=self.feature_name, params=self.params)
feature_name=self.feature_name, params=self.params)
else:
# construct subset
used_indices = list_to_1d_numpy(self.used_indices, np.int32, name='used_indices')
......@@ -1847,14 +1833,12 @@ class Dataset:
self._lazy_init(self.data, label=self.label,
weight=self.weight, group=self.group,
init_score=self.init_score, predictor=self._predictor,
silent=self.silent, feature_name=self.feature_name,
categorical_feature=self.categorical_feature, params=self.params)
feature_name=self.feature_name, categorical_feature=self.categorical_feature, params=self.params)
if self.free_raw_data:
self.data = None
return self
def create_valid(self, data, label=None, weight=None, group=None,
init_score=None, silent='warn', params=None):
def create_valid(self, data, label=None, weight=None, group=None, init_score=None, params=None):
"""Create validation data align with current Dataset.
Parameters
......@@ -1874,8 +1858,6 @@ class Dataset:
where the first 10 records are in the first group, records 11-30 are in the second group, records 31-70 are in the third group, etc.
init_score : list, list of lists (for multi-class task), numpy array, pandas Series, pandas DataFrame (for multi-class task), or None, optional (default=None)
Init score for Dataset.
silent : bool, optional (default=False)
Whether to print messages during construction.
params : dict or None, optional (default=None)
Other parameters for validation Dataset.
......@@ -1886,7 +1868,7 @@ class Dataset:
"""
ret = Dataset(data, label=label, reference=self,
weight=weight, group=group, init_score=init_score,
silent=silent, params=params, free_raw_data=self.free_raw_data)
params=params, free_raw_data=self.free_raw_data)
ret._predictor = self._predictor
ret.pandas_categorical = self.pandas_categorical
return ret
......@@ -2562,7 +2544,7 @@ class Dataset:
class Booster:
"""Booster in LightGBM."""
def __init__(self, params=None, train_set=None, model_file=None, model_str=None, silent='warn'):
def __init__(self, params=None, train_set=None, model_file=None, model_str=None):
"""Initialize the Booster.
Parameters
......@@ -2575,8 +2557,6 @@ class Booster:
Path to the model file.
model_str : str or None, optional (default=None)
Model will be loaded from this string.
silent : bool, optional (default=False)
Whether to print messages during construction.
"""
self.handle = None
self.network = False
......@@ -2587,14 +2567,6 @@ class Booster:
self.best_iteration = -1
self.best_score = {}
params = {} if params is None else deepcopy(params)
# user can set verbose with params, it has higher priority
if silent != 'warn':
_log_warning("'silent' argument is deprecated and will be removed in a future release of LightGBM. "
"Pass 'verbose' parameter via 'params' instead.")
else:
silent = False
if not any(verbose_alias in params for verbose_alias in _ConfigAliases.get("verbosity")) and silent:
params["verbose"] = -1
if train_set is not None:
# Training task
if not isinstance(train_set, Dataset):
......@@ -3388,12 +3360,6 @@ class Booster:
_safe_call(_LIB.LGBM_BoosterGetNumClasses(
self.handle,
ctypes.byref(out_num_class)))
if verbose in {'warn', '_silent_false'}:
verbose = verbose == 'warn'
else:
_log_warning("'verbose' argument is deprecated and will be removed in a future release of LightGBM.")
if verbose:
_log_info(f'Finished loading model, total used {int(out_num_iterations.value)} iterations')
self.__num_class = out_num_class.value
self.pandas_categorical = _load_pandas_categorical(model_str=model_str)
return self
......@@ -3608,7 +3574,7 @@ class Booster:
default_value=None
)
new_params["linear_tree"] = bool(out_is_linear.value)
train_set = Dataset(data, label, silent=True, params=new_params)
train_set = Dataset(data, label, params=new_params)
new_params['refit_decay_rate'] = decay_rate
new_booster = Booster(new_params, train_set)
# Copy models
......
......@@ -1108,7 +1108,6 @@ class DaskLGBMClassifier(LGBMClassifier, _DaskLGBMModel):
reg_lambda: float = 0.,
random_state: Optional[Union[int, np.random.RandomState]] = None,
n_jobs: int = -1,
silent: bool = "warn",
importance_type: str = 'split',
client: Optional[Client] = None,
**kwargs: Any
......@@ -1134,7 +1133,6 @@ class DaskLGBMClassifier(LGBMClassifier, _DaskLGBMModel):
reg_lambda=reg_lambda,
random_state=random_state,
n_jobs=n_jobs,
silent=silent,
importance_type=importance_type,
**kwargs
)
......@@ -1293,7 +1291,6 @@ class DaskLGBMRegressor(LGBMRegressor, _DaskLGBMModel):
reg_lambda: float = 0.,
random_state: Optional[Union[int, np.random.RandomState]] = None,
n_jobs: int = -1,
silent: bool = "warn",
importance_type: str = 'split',
client: Optional[Client] = None,
**kwargs: Any
......@@ -1319,7 +1316,6 @@ class DaskLGBMRegressor(LGBMRegressor, _DaskLGBMModel):
reg_lambda=reg_lambda,
random_state=random_state,
n_jobs=n_jobs,
silent=silent,
importance_type=importance_type,
**kwargs
)
......@@ -1458,7 +1454,6 @@ class DaskLGBMRanker(LGBMRanker, _DaskLGBMModel):
reg_lambda: float = 0.,
random_state: Optional[Union[int, np.random.RandomState]] = None,
n_jobs: int = -1,
silent: bool = "warn",
importance_type: str = 'split',
client: Optional[Client] = None,
**kwargs: Any
......@@ -1484,7 +1479,6 @@ class DaskLGBMRanker(LGBMRanker, _DaskLGBMModel):
reg_lambda=reg_lambda,
random_state=random_state,
n_jobs=n_jobs,
silent=silent,
importance_type=importance_type,
**kwargs
)
......
......@@ -398,7 +398,6 @@ class LGBMModel(_LGBMModelBase):
reg_lambda: float = 0.,
random_state: Optional[Union[int, np.random.RandomState]] = None,
n_jobs: int = -1,
silent: Union[bool, str] = 'warn',
importance_type: str = 'split',
**kwargs
):
......@@ -463,8 +462,6 @@ class LGBMModel(_LGBMModelBase):
If None, default seeds in C++ code are used.
n_jobs : int, optional (default=-1)
Number of parallel threads to use for training (can be changed at prediction time).
silent : bool, optional (default=True)
Whether to print messages while running boosting.
importance_type : str, optional (default='split')
The type of feature importance to be filled into ``feature_importances_``.
If 'split', result contains numbers of times the feature is used in a model.
......@@ -528,7 +525,6 @@ class LGBMModel(_LGBMModelBase):
self.reg_lambda = reg_lambda
self.random_state = random_state
self.n_jobs = n_jobs
self.silent = silent
self.importance_type = importance_type
self._Booster = None
self._evals_result = None
......@@ -632,17 +628,6 @@ class LGBMModel(_LGBMModelBase):
self._fobj = None
params['objective'] = self._objective
# user can set verbose with kwargs, it has higher priority
if self.silent != "warn":
_log_warning("'silent' argument is deprecated and will be removed in a future release of LightGBM. "
"Pass 'verbose' parameter via keyword arguments instead.")
silent = self.silent
else:
silent = True
if not any(verbose_alias in params for verbose_alias in _ConfigAliases.get("verbosity")) and silent:
params['verbose'] = -1
params.pop('silent', None)
params.pop('importance_type', None)
params.pop('n_estimators', None)
params.pop('class_weight', None)
......
......@@ -1686,8 +1686,8 @@ def test_fpreproc():
def test_metrics():
X, y = load_digits(n_class=2, return_X_y=True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
lgb_train = lgb.Dataset(X_train, y_train, silent=True)
lgb_valid = lgb.Dataset(X_test, y_test, reference=lgb_train, silent=True)
lgb_train = lgb.Dataset(X_train, y_train)
lgb_valid = lgb.Dataset(X_test, y_test, reference=lgb_train)
evals_result = {}
params_verbose = {'verbose': -1}
......@@ -1991,7 +1991,7 @@ def test_metrics():
assert 'error' in evals_result['valid_0']
X, y = load_digits(n_class=3, return_X_y=True)
lgb_train = lgb.Dataset(X, y, silent=True)
lgb_train = lgb.Dataset(X, y)
obj_multi_aliases = ['multiclass', 'softmax', 'multiclassova', 'multiclass_ova', 'ova', 'ovr']
for obj_multi_alias in obj_multi_aliases:
......@@ -2065,8 +2065,8 @@ def test_multiple_feval_train():
X_train, X_validation, y_train, y_validation = train_test_split(X, y, test_size=0.2)
train_dataset = lgb.Dataset(data=X_train, label=y_train, silent=True)
validation_dataset = lgb.Dataset(data=X_validation, label=y_validation, reference=train_dataset, silent=True)
train_dataset = lgb.Dataset(data=X_train, label=y_train)
validation_dataset = lgb.Dataset(data=X_validation, label=y_validation, reference=train_dataset)
evals_result = {}
lgb.train(
params=params,
......@@ -2087,7 +2087,7 @@ def test_multiple_feval_cv():
params = {'verbose': -1, 'objective': 'binary', 'metric': 'binary_logloss'}
train_dataset = lgb.Dataset(data=X, label=y, silent=True)
train_dataset = lgb.Dataset(data=X, label=y)
cv_results = lgb.cv(
params=params,
......
......@@ -45,7 +45,7 @@ def test_plot_importance(params, breast_cancer_split, train_data):
assert ax0.get_ylabel() == 'Features'
assert len(ax0.patches) <= 30
gbm1 = lgb.LGBMClassifier(n_estimators=10, num_leaves=3, silent=True)
gbm1 = lgb.LGBMClassifier(n_estimators=10, num_leaves=3, verbose=-1)
gbm1.fit(X_train, y_train)
ax1 = lgb.plot_importance(gbm1, color='r', title='t', xlabel='x', ylabel='y')
......@@ -75,7 +75,7 @@ def test_plot_importance(params, breast_cancer_split, train_data):
assert ax3.get_ylabel() == 'y @importance_type@'
assert len(ax3.patches) <= 30
gbm2 = lgb.LGBMClassifier(n_estimators=10, num_leaves=3, silent=True, importance_type="gain")
gbm2 = lgb.LGBMClassifier(n_estimators=10, num_leaves=3, verbose=-1, importance_type="gain")
gbm2.fit(X_train, y_train)
def get_bounds_of_first_patch(axes):
......@@ -107,7 +107,7 @@ def test_plot_split_value_histogram(params, breast_cancer_split, train_data):
assert ax0.get_ylabel() == 'Count'
assert len(ax0.patches) <= 2
gbm1 = lgb.LGBMClassifier(n_estimators=10, num_leaves=3, silent=True)
gbm1 = lgb.LGBMClassifier(n_estimators=10, num_leaves=3, verbose=-1)
gbm1.fit(X_train, y_train)
ax1 = lgb.plot_split_value_histogram(gbm1, gbm1.booster_.feature_name()[27], figsize=(10, 5),
......@@ -142,7 +142,7 @@ def test_plot_split_value_histogram(params, breast_cancer_split, train_data):
reason='matplotlib or graphviz is not installed')
def test_plot_tree(breast_cancer_split):
X_train, _, y_train, _ = breast_cancer_split
gbm = lgb.LGBMClassifier(n_estimators=10, num_leaves=3, silent=True)
gbm = lgb.LGBMClassifier(n_estimators=10, num_leaves=3, verbose=-1)
gbm.fit(X_train, y_train, verbose=False)
with pytest.raises(IndexError):
......@@ -160,7 +160,7 @@ def test_create_tree_digraph(breast_cancer_split):
X_train, _, y_train, _ = breast_cancer_split
constraints = [-1, 1] * int(X_train.shape[1] / 2)
gbm = lgb.LGBMClassifier(n_estimators=10, num_leaves=3, silent=True, monotone_constraints=constraints)
gbm = lgb.LGBMClassifier(n_estimators=10, num_leaves=3, verbose=-1, monotone_constraints=constraints)
gbm.fit(X_train, y_train, verbose=False)
with pytest.raises(IndexError):
......@@ -264,7 +264,7 @@ def test_plot_metrics(params, breast_cancer_split, train_data):
with pytest.raises(ValueError, match="eval results cannot be empty."):
lgb.plot_metric(evals_result1)
gbm2 = lgb.LGBMClassifier(n_estimators=10, num_leaves=3, silent=True)
gbm2 = lgb.LGBMClassifier(n_estimators=10, num_leaves=3, verbose=-1)
gbm2.fit(X_train, y_train, eval_set=[(X_test, y_test)], verbose=False)
ax4 = lgb.plot_metric(gbm2, title=None, xlabel=None, ylabel=None)
assert isinstance(ax4, matplotlib.axes.Axes)
......
......@@ -91,7 +91,7 @@ def multi_logloss(y_true, y_pred):
def test_binary():
X, y = load_breast_cancer(return_X_y=True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
gbm = lgb.LGBMClassifier(n_estimators=50, silent=True)
gbm = lgb.LGBMClassifier(n_estimators=50, verbose=-1)
gbm.fit(X_train, y_train, eval_set=[(X_test, y_test)], early_stopping_rounds=5, verbose=False)
ret = log_loss(y_test, gbm.predict_proba(X_test))
assert ret < 0.12
......@@ -101,7 +101,7 @@ def test_binary():
def test_regression():
X, y = load_boston(return_X_y=True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
gbm = lgb.LGBMRegressor(n_estimators=50, silent=True)
gbm = lgb.LGBMRegressor(n_estimators=50, verbose=-1)
gbm.fit(X_train, y_train, eval_set=[(X_test, y_test)], early_stopping_rounds=5, verbose=False)
ret = mean_squared_error(y_test, gbm.predict(X_test))
assert ret < 7
......@@ -111,7 +111,7 @@ def test_regression():
def test_multiclass():
X, y = load_digits(n_class=10, return_X_y=True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
gbm = lgb.LGBMClassifier(n_estimators=50, silent=True)
gbm = lgb.LGBMClassifier(n_estimators=50, verbose=-1)
gbm.fit(X_train, y_train, eval_set=[(X_test, y_test)], early_stopping_rounds=5, verbose=False)
ret = multi_error(y_test, gbm.predict(X_test))
assert ret < 0.05
......@@ -195,7 +195,7 @@ def test_objective_aliases(custom_objective):
def test_regression_with_custom_objective():
X, y = load_boston(return_X_y=True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
gbm = lgb.LGBMRegressor(n_estimators=50, silent=True, objective=objective_ls)
gbm = lgb.LGBMRegressor(n_estimators=50, verbose=-1, objective=objective_ls)
gbm.fit(X_train, y_train, eval_set=[(X_test, y_test)], early_stopping_rounds=5, verbose=False)
ret = mean_squared_error(y_test, gbm.predict(X_test))
assert ret < 7.0
......@@ -205,7 +205,7 @@ def test_regression_with_custom_objective():
def test_binary_classification_with_custom_objective():
X, y = load_digits(n_class=2, return_X_y=True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
gbm = lgb.LGBMClassifier(n_estimators=50, silent=True, objective=logregobj)
gbm = lgb.LGBMClassifier(n_estimators=50, verbose=-1, objective=logregobj)
gbm.fit(X_train, y_train, eval_set=[(X_test, y_test)], early_stopping_rounds=5, verbose=False)
# prediction result is actually not transformed (is raw) due to custom objective
y_pred_raw = gbm.predict_proba(X_test)
......@@ -421,7 +421,7 @@ def test_regressor_chain():
def test_clone_and_property():
X, y = load_boston(return_X_y=True)
gbm = lgb.LGBMRegressor(n_estimators=10, silent=True)
gbm = lgb.LGBMRegressor(n_estimators=10, verbose=-1)
gbm.fit(X, y, verbose=False)
gbm_clone = clone(gbm)
......@@ -429,7 +429,7 @@ def test_clone_and_property():
assert isinstance(gbm.feature_importances_, np.ndarray)
X, y = load_digits(n_class=2, return_X_y=True)
clf = lgb.LGBMClassifier(n_estimators=10, silent=True)
clf = lgb.LGBMClassifier(n_estimators=10, verbose=-1)
clf.fit(X, y, verbose=False)
assert sorted(clf.classes_) == [0, 1]
assert clf.n_classes_ == 2
......@@ -441,7 +441,7 @@ def test_joblib():
X, y = load_boston(return_X_y=True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
gbm = lgb.LGBMRegressor(n_estimators=10, objective=custom_asymmetric_obj,
silent=True, importance_type='split')
verbose=-1, importance_type='split')
gbm.fit(X_train, y_train, eval_set=[(X_train, y_train), (X_test, y_test)],
eval_metric=mse, early_stopping_rounds=5, verbose=False,
callbacks=[lgb.reset_parameter(learning_rate=list(np.arange(1, 0, -0.1)))])
......@@ -694,7 +694,7 @@ def test_predict():
def test_evaluate_train_set():
X, y = load_boston(return_X_y=True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
gbm = lgb.LGBMRegressor(n_estimators=10, silent=True)
gbm = lgb.LGBMRegressor(n_estimators=10, verbose=-1)
gbm.fit(X_train, y_train, eval_set=[(X_train, y_train), (X_test, y_test)], verbose=False)
assert len(gbm.evals_result_) == 2
assert 'training' in gbm.evals_result_
......@@ -1142,7 +1142,7 @@ def test_class_weight():
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
y_train_str = y_train.astype('str')
y_test_str = y_test.astype('str')
gbm = lgb.LGBMClassifier(n_estimators=10, class_weight='balanced', silent=True)
gbm = lgb.LGBMClassifier(n_estimators=10, class_weight='balanced', verbose=-1)
gbm.fit(X_train, y_train,
eval_set=[(X_train, y_train), (X_test, y_test), (X_test, y_test),
(X_test, y_test), (X_test, y_test)],
......@@ -1154,7 +1154,7 @@ def test_class_weight():
np.testing.assert_allclose,
gbm.evals_result_[eval_set1][metric],
gbm.evals_result_[eval_set2][metric])
gbm_str = lgb.LGBMClassifier(n_estimators=10, class_weight='balanced', silent=True)
gbm_str = lgb.LGBMClassifier(n_estimators=10, class_weight='balanced', verbose=-1)
gbm_str.fit(X_train, y_train_str,
eval_set=[(X_train, y_train_str), (X_test, y_test_str),
(X_test, y_test_str), (X_test, y_test_str), (X_test, y_test_str)],
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment