Unverified Commit 7eac5a63 authored by Nikita Titov's avatar Nikita Titov Committed by GitHub
Browse files

[python] minor refactoring of Python code (#4442)

* Update test_sklearn.py

* Update test_basic.py

* Update dask.py

* Update basic.py

* Update basic.py

* Update basic.py

* Update basic.py

* Update callback.py
parent 1525cc42
...@@ -649,7 +649,7 @@ class Sequence(abc.ABC): ...@@ -649,7 +649,7 @@ class Sequence(abc.ABC):
elif isinstance(idx, slice): elif isinstance(idx, slice):
return np.stack(self.__get_one_line__(i) for i in range(idx.start, idx.stop)) return np.stack(self.__get_one_line__(i) for i in range(idx.start, idx.stop))
else: else:
raise TypeError(f"Sequence index must be integer or slice, got {type(idx)}") raise TypeError(f"Sequence index must be integer or slice, got {type(idx).__name__}")
Parameters Parameters
---------- ----------
...@@ -1436,7 +1436,7 @@ class Dataset: ...@@ -1436,7 +1436,7 @@ class Dataset:
args_names = (getattr(self.__class__, '_lazy_init') args_names = (getattr(self.__class__, '_lazy_init')
.__code__ .__code__
.co_varnames[:getattr(self.__class__, '_lazy_init').__code__.co_argcount]) .co_varnames[:getattr(self.__class__, '_lazy_init').__code__.co_argcount])
for key, _ in params.items(): for key in params.keys():
if key in args_names: if key in args_names:
_log_warning(f'{key} keyword has been found in `params` and will be ignored.\n' _log_warning(f'{key} keyword has been found in `params` and will be ignored.\n'
f'Please use {key} argument of the Dataset constructor to pass this parameter.') f'Please use {key} argument of the Dataset constructor to pass this parameter.')
...@@ -1947,7 +1947,7 @@ class Dataset: ...@@ -1947,7 +1947,7 @@ class Dataset:
Returns Returns
------- -------
info : numpy array info : numpy array or None
A numpy array with information from the Dataset. A numpy array with information from the Dataset.
""" """
if self.handle is None: if self.handle is None:
...@@ -2042,7 +2042,7 @@ class Dataset: ...@@ -2042,7 +2042,7 @@ class Dataset:
self.set_categorical_feature(reference.categorical_feature) \ self.set_categorical_feature(reference.categorical_feature) \
.set_feature_name(reference.feature_name) \ .set_feature_name(reference.feature_name) \
._set_predictor(reference._predictor) ._set_predictor(reference._predictor)
# we're done if self and reference share a common upstrem reference # we're done if self and reference share a common upstream reference
if self.get_ref_chain().intersection(reference.get_ref_chain()): if self.get_ref_chain().intersection(reference.get_ref_chain()):
return self return self
if self.data is not None: if self.data is not None:
......
...@@ -129,7 +129,7 @@ def reset_parameter(**kwargs: Union[list, Callable]) -> Callable: ...@@ -129,7 +129,7 @@ def reset_parameter(**kwargs: Union[list, Callable]) -> Callable:
for key, value in kwargs.items(): for key, value in kwargs.items():
if isinstance(value, list): if isinstance(value, list):
if len(value) != env.end_iteration - env.begin_iteration: if len(value) != env.end_iteration - env.begin_iteration:
raise ValueError(f"Length of list {repr(key)} has to equal to 'num_boost_round'.") raise ValueError(f"Length of list {key!r} has to equal to 'num_boost_round'.")
new_param = value[env.iteration - env.begin_iteration] new_param = value[env.iteration - env.begin_iteration]
else: else:
new_param = value(env.iteration - env.begin_iteration) new_param = value(env.iteration - env.begin_iteration)
......
...@@ -82,7 +82,7 @@ def _concat(seq: List[_DaskPart]) -> _DaskPart: ...@@ -82,7 +82,7 @@ def _concat(seq: List[_DaskPart]) -> _DaskPart:
elif isinstance(seq[0], ss.spmatrix): elif isinstance(seq[0], ss.spmatrix):
return ss.vstack(seq, format='csr') return ss.vstack(seq, format='csr')
else: else:
raise TypeError(f'Data must be one of: numpy arrays, pandas dataframes, sparse matrices (from scipy). Got {type(seq[0])}.') raise TypeError(f'Data must be one of: numpy arrays, pandas dataframes, sparse matrices (from scipy). Got {type(seq[0]).__name__}.')
def _remove_list_padding(*args: Any) -> List[List[Any]]: def _remove_list_padding(*args: Any) -> List[List[Any]]:
...@@ -898,7 +898,7 @@ def _predict( ...@@ -898,7 +898,7 @@ def _predict(
**kwargs **kwargs
) )
else: else:
raise TypeError(f'Data must be either Dask Array or Dask DataFrame. Got {type(data)}.') raise TypeError(f'Data must be either Dask Array or Dask DataFrame. Got {type(data).__name__}.')
class _DaskLGBMModel: class _DaskLGBMModel:
......
...@@ -106,7 +106,7 @@ class NumpySequence(lgb.Sequence): ...@@ -106,7 +106,7 @@ class NumpySequence(lgb.Sequence):
raise NotImplementedError("No need to implement, caller will not set step by now") raise NotImplementedError("No need to implement, caller will not set step by now")
return self.ndarray[idx.start:idx.stop] return self.ndarray[idx.start:idx.stop]
else: else:
raise TypeError(f"Sequence Index must be an integer/list/slice, got {type(idx)}") raise TypeError(f"Sequence Index must be an integer/list/slice, got {type(idx).__name__}")
def __len__(self): def __len__(self):
return len(self.ndarray) return len(self.ndarray)
......
...@@ -33,7 +33,7 @@ decreasing_generator = itertools.count(0, -1) ...@@ -33,7 +33,7 @@ decreasing_generator = itertools.count(0, -1)
def custom_asymmetric_obj(y_true, y_pred): def custom_asymmetric_obj(y_true, y_pred):
residual = (y_true - y_pred).astype("float") residual = (y_true - y_pred).astype(np.float64)
grad = np.where(residual < 0, -2 * 10.0 * residual, -2 * residual) grad = np.where(residual < 0, -2 * 10.0 * residual, -2 * residual)
hess = np.where(residual < 0, 2 * 10.0, 2.0) hess = np.where(residual < 0, 2 * 10.0, 2.0)
return grad, hess return grad, hess
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment