Commit 21487d8a authored by Nikita Titov's avatar Nikita Titov Committed by Guolin Ke
Browse files

[ci][python] updated pep8 to pycodestyle (#1358)

* updated pep8 to pycodestyle

* fixed E722 do not use bare 'except'

* fixed W605 invalid escape sequence '\*'

* fixed W504 line break after binary operator

* ignore W605 invalid escape sequence '\*' in nuget builder

* made pycodestyle happy
parent 7f7bc447
...@@ -30,7 +30,7 @@ install: ...@@ -30,7 +30,7 @@ install:
- ps: $env:LGB_VER = (Get-Content VERSION.txt).trim() - ps: $env:LGB_VER = (Get-Content VERSION.txt).trim()
- conda config --set always_yes yes --set changeps1 no - conda config --set always_yes yes --set changeps1 no
- conda update -q conda - conda update -q conda
- conda create -q -n test-env python=%PYTHON_VERSION% numpy nose scipy scikit-learn pandas matplotlib pep8 pytest - conda create -q -n test-env python=%PYTHON_VERSION% numpy nose scipy scikit-learn pandas matplotlib pytest
- activate test-env - activate test-env
build_script: build_script:
......
...@@ -50,8 +50,8 @@ if [[ ${TASK} == "check-docs" ]]; then ...@@ -50,8 +50,8 @@ if [[ ${TASK} == "check-docs" ]]; then
fi fi
if [[ ${TASK} == "pylint" ]]; then if [[ ${TASK} == "pylint" ]]; then
conda install pep8 conda install pycodestyle
pep8 --ignore=E501 --exclude=./compute,./docs . || exit -1 pycodestyle --ignore=E501,W503 --exclude=./compute,./docs,./.nuget . || exit -1
exit 0 exit 0
fi fi
......
...@@ -8,7 +8,7 @@ from sklearn.metrics import mean_squared_error ...@@ -8,7 +8,7 @@ from sklearn.metrics import mean_squared_error
try: try:
import cPickle as pickle import cPickle as pickle
except: except BaseException:
import pickle import pickle
# load or create your dataset # load or create your dataset
......
...@@ -438,7 +438,7 @@ class _InnerPredictor(object): ...@@ -438,7 +438,7 @@ class _InnerPredictor(object):
elif isinstance(data, list): elif isinstance(data, list):
try: try:
data = np.array(data) data = np.array(data)
except: except BaseException:
raise ValueError('Cannot convert data list to numpy array.') raise ValueError('Cannot convert data list to numpy array.')
preds, nrow = self.__pred_for_np2d(data, num_iteration, preds, nrow = self.__pred_for_np2d(data, num_iteration,
predict_type) predict_type)
...@@ -446,7 +446,7 @@ class _InnerPredictor(object): ...@@ -446,7 +446,7 @@ class _InnerPredictor(object):
try: try:
warnings.warn('Converting data to scipy sparse matrix.') warnings.warn('Converting data to scipy sparse matrix.')
csr = scipy.sparse.csr_matrix(data) csr = scipy.sparse.csr_matrix(data)
except: except BaseException:
raise TypeError('Cannot predict data for type {}'.format(type(data).__name__)) raise TypeError('Cannot predict data for type {}'.format(type(data).__name__))
preds, nrow = self.__pred_for_csr(csr, num_iteration, preds, nrow = self.__pred_for_csr(csr, num_iteration,
predict_type) predict_type)
...@@ -712,7 +712,7 @@ class Dataset(object): ...@@ -712,7 +712,7 @@ class Dataset(object):
try: try:
csr = scipy.sparse.csr_matrix(data) csr = scipy.sparse.csr_matrix(data)
self.__init_from_csr(csr, params_str, ref_dataset) self.__init_from_csr(csr, params_str, ref_dataset)
except: except BaseException:
raise TypeError('Cannot initialize Dataset from {}'.format(type(data).__name__)) raise TypeError('Cannot initialize Dataset from {}'.format(type(data).__name__))
if label is not None: if label is not None:
self.set_label(label) self.set_label(label)
......
...@@ -197,7 +197,7 @@ class LGBMModel(_LGBMModelBase): ...@@ -197,7 +197,7 @@ class LGBMModel(_LGBMModelBase):
Note Note
---- ----
\*\*kwargs is not supported in sklearn, it may cause unexpected issues. \\*\\*kwargs is not supported in sklearn, it may cause unexpected issues.
Attributes Attributes
---------- ----------
...@@ -618,12 +618,12 @@ class LGBMRegressor(LGBMModel, _LGBMRegressorBase): ...@@ -618,12 +618,12 @@ class LGBMRegressor(LGBMModel, _LGBMRegressorBase):
return self return self
base_doc = LGBMModel.fit.__doc__ base_doc = LGBMModel.fit.__doc__
fit.__doc__ = (base_doc[:base_doc.find('eval_class_weight :')] + fit.__doc__ = (base_doc[:base_doc.find('eval_class_weight :')]
base_doc[base_doc.find('eval_init_score :'):]) + base_doc[base_doc.find('eval_init_score :'):])
base_doc = fit.__doc__ base_doc = fit.__doc__
fit.__doc__ = (base_doc[:base_doc.find('eval_metric :')] + fit.__doc__ = (base_doc[:base_doc.find('eval_metric :')]
'eval_metric : string, list of strings, callable or None, optional (default="l2")\n' + + 'eval_metric : string, list of strings, callable or None, optional (default="l2")\n'
base_doc[base_doc.find(' If string, it should be a built-in evaluation metric to use.'):]) + base_doc[base_doc.find(' If string, it should be a built-in evaluation metric to use.'):])
class LGBMClassifier(LGBMModel, _LGBMClassifierBase): class LGBMClassifier(LGBMModel, _LGBMClassifierBase):
...@@ -679,9 +679,9 @@ class LGBMClassifier(LGBMModel, _LGBMClassifierBase): ...@@ -679,9 +679,9 @@ class LGBMClassifier(LGBMModel, _LGBMClassifierBase):
return self return self
base_doc = LGBMModel.fit.__doc__ base_doc = LGBMModel.fit.__doc__
fit.__doc__ = (base_doc[:base_doc.find('eval_metric :')] + fit.__doc__ = (base_doc[:base_doc.find('eval_metric :')]
'eval_metric : string, list of strings, callable or None, optional (default="logloss")\n' + + 'eval_metric : string, list of strings, callable or None, optional (default="logloss")\n'
base_doc[base_doc.find(' If string, it should be a built-in evaluation metric to use.'):]) + base_doc[base_doc.find(' If string, it should be a built-in evaluation metric to use.'):])
def predict(self, X, raw_score=False, num_iteration=0): def predict(self, X, raw_score=False, num_iteration=0):
class_probs = self.predict_proba(X, raw_score, num_iteration) class_probs = self.predict_proba(X, raw_score, num_iteration)
...@@ -773,12 +773,12 @@ class LGBMRanker(LGBMModel): ...@@ -773,12 +773,12 @@ class LGBMRanker(LGBMModel):
return self return self
base_doc = LGBMModel.fit.__doc__ base_doc = LGBMModel.fit.__doc__
fit.__doc__ = (base_doc[:base_doc.find('eval_class_weight :')] + fit.__doc__ = (base_doc[:base_doc.find('eval_class_weight :')]
base_doc[base_doc.find('eval_init_score :'):]) + base_doc[base_doc.find('eval_init_score :'):])
base_doc = fit.__doc__ base_doc = fit.__doc__
fit.__doc__ = (base_doc[:base_doc.find('eval_metric :')] + fit.__doc__ = (base_doc[:base_doc.find('eval_metric :')]
'eval_metric : string, list of strings, callable or None, optional (default="ndcg")\n' + + 'eval_metric : string, list of strings, callable or None, optional (default="ndcg")\n'
base_doc[base_doc.find(' If string, it should be a built-in evaluation metric to use.'):base_doc.find('early_stopping_rounds :')] + + base_doc[base_doc.find(' If string, it should be a built-in evaluation metric to use.'):base_doc.find('early_stopping_rounds :')]
'eval_at : list of int, optional (default=[1])\n' + 'eval_at : list of int, optional (default=[1])\n'
' The evaluation positions of NDCG.\n' + ' The evaluation positions of NDCG.\n'
base_doc[base_doc.find(' early_stopping_rounds :'):]) + base_doc[base_doc.find(' early_stopping_rounds :'):])
...@@ -626,13 +626,11 @@ class TestEngine(unittest.TestCase): ...@@ -626,13 +626,11 @@ class TestEngine(unittest.TestCase):
x2_negatively_correlated_with_y = np.random.random(size=number_of_dpoints) x2_negatively_correlated_with_y = np.random.random(size=number_of_dpoints)
x = np.column_stack((x1_positively_correlated_with_y, x2_negatively_correlated_with_y)) x = np.column_stack((x1_positively_correlated_with_y, x2_negatively_correlated_with_y))
zs = np.random.normal(loc=0.0, scale=0.01, size=number_of_dpoints) zs = np.random.normal(loc=0.0, scale=0.01, size=number_of_dpoints)
y = ( y = (5 * x1_positively_correlated_with_y
5 * x1_positively_correlated_with_y + + np.sin(10 * np.pi * x1_positively_correlated_with_y)
np.sin(10 * np.pi * x1_positively_correlated_with_y) - - 5 * x2_negatively_correlated_with_y
5 * x2_negatively_correlated_with_y - - np.cos(10 * np.pi * x2_negatively_correlated_with_y)
np.cos(10 * np.pi * x2_negatively_correlated_with_y) + + zs)
zs
)
trainset = lgb.Dataset(x, label=y) trainset = lgb.Dataset(x, label=y)
params = { params = {
'min_data': 20, 'min_data': 20,
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment