Commit 21487d8a authored by Nikita Titov's avatar Nikita Titov Committed by Guolin Ke
Browse files

[ci][python] updated pep8 to pycodestyle (#1358)

* updated pep8 to pycodestyle

* fixed E722 do not use bare 'except'

* fixed W605 invalid escape sequence '\*'

* fixed W504 line break after binary operator

* ignore W605 invalid escape sequence '\*' in nuget builder

* made pycodestyle happy
parent 7f7bc447
......@@ -30,7 +30,7 @@ install:
- ps: $env:LGB_VER = (Get-Content VERSION.txt).trim()
- conda config --set always_yes yes --set changeps1 no
- conda update -q conda
- conda create -q -n test-env python=%PYTHON_VERSION% numpy nose scipy scikit-learn pandas matplotlib pep8 pytest
- conda create -q -n test-env python=%PYTHON_VERSION% numpy nose scipy scikit-learn pandas matplotlib pytest
- activate test-env
build_script:
......
......@@ -50,8 +50,8 @@ if [[ ${TASK} == "check-docs" ]]; then
fi
if [[ ${TASK} == "pylint" ]]; then
conda install pep8
pep8 --ignore=E501 --exclude=./compute,./docs . || exit -1
conda install pycodestyle
pycodestyle --ignore=E501,W503 --exclude=./compute,./docs,./.nuget . || exit -1
exit 0
fi
......
......@@ -8,7 +8,7 @@ from sklearn.metrics import mean_squared_error
try:
import cPickle as pickle
except:
except BaseException:
import pickle
# load or create your dataset
......
......@@ -438,7 +438,7 @@ class _InnerPredictor(object):
elif isinstance(data, list):
try:
data = np.array(data)
except:
except BaseException:
raise ValueError('Cannot convert data list to numpy array.')
preds, nrow = self.__pred_for_np2d(data, num_iteration,
predict_type)
......@@ -446,7 +446,7 @@ class _InnerPredictor(object):
try:
warnings.warn('Converting data to scipy sparse matrix.')
csr = scipy.sparse.csr_matrix(data)
except:
except BaseException:
raise TypeError('Cannot predict data for type {}'.format(type(data).__name__))
preds, nrow = self.__pred_for_csr(csr, num_iteration,
predict_type)
......@@ -712,7 +712,7 @@ class Dataset(object):
try:
csr = scipy.sparse.csr_matrix(data)
self.__init_from_csr(csr, params_str, ref_dataset)
except:
except BaseException:
raise TypeError('Cannot initialize Dataset from {}'.format(type(data).__name__))
if label is not None:
self.set_label(label)
......
......@@ -197,7 +197,7 @@ class LGBMModel(_LGBMModelBase):
Note
----
\*\*kwargs is not supported in sklearn, it may cause unexpected issues.
\\*\\*kwargs is not supported in sklearn, it may cause unexpected issues.
Attributes
----------
......@@ -618,12 +618,12 @@ class LGBMRegressor(LGBMModel, _LGBMRegressorBase):
return self
base_doc = LGBMModel.fit.__doc__
fit.__doc__ = (base_doc[:base_doc.find('eval_class_weight :')] +
base_doc[base_doc.find('eval_init_score :'):])
fit.__doc__ = (base_doc[:base_doc.find('eval_class_weight :')]
+ base_doc[base_doc.find('eval_init_score :'):])
base_doc = fit.__doc__
fit.__doc__ = (base_doc[:base_doc.find('eval_metric :')] +
'eval_metric : string, list of strings, callable or None, optional (default="l2")\n' +
base_doc[base_doc.find(' If string, it should be a built-in evaluation metric to use.'):])
fit.__doc__ = (base_doc[:base_doc.find('eval_metric :')]
+ 'eval_metric : string, list of strings, callable or None, optional (default="l2")\n'
+ base_doc[base_doc.find(' If string, it should be a built-in evaluation metric to use.'):])
class LGBMClassifier(LGBMModel, _LGBMClassifierBase):
......@@ -679,9 +679,9 @@ class LGBMClassifier(LGBMModel, _LGBMClassifierBase):
return self
base_doc = LGBMModel.fit.__doc__
fit.__doc__ = (base_doc[:base_doc.find('eval_metric :')] +
'eval_metric : string, list of strings, callable or None, optional (default="logloss")\n' +
base_doc[base_doc.find(' If string, it should be a built-in evaluation metric to use.'):])
fit.__doc__ = (base_doc[:base_doc.find('eval_metric :')]
+ 'eval_metric : string, list of strings, callable or None, optional (default="logloss")\n'
+ base_doc[base_doc.find(' If string, it should be a built-in evaluation metric to use.'):])
def predict(self, X, raw_score=False, num_iteration=0):
class_probs = self.predict_proba(X, raw_score, num_iteration)
......@@ -773,12 +773,12 @@ class LGBMRanker(LGBMModel):
return self
base_doc = LGBMModel.fit.__doc__
fit.__doc__ = (base_doc[:base_doc.find('eval_class_weight :')] +
base_doc[base_doc.find('eval_init_score :'):])
fit.__doc__ = (base_doc[:base_doc.find('eval_class_weight :')]
+ base_doc[base_doc.find('eval_init_score :'):])
base_doc = fit.__doc__
fit.__doc__ = (base_doc[:base_doc.find('eval_metric :')] +
'eval_metric : string, list of strings, callable or None, optional (default="ndcg")\n' +
base_doc[base_doc.find(' If string, it should be a built-in evaluation metric to use.'):base_doc.find('early_stopping_rounds :')] +
'eval_at : list of int, optional (default=[1])\n'
' The evaluation positions of NDCG.\n' +
base_doc[base_doc.find(' early_stopping_rounds :'):])
fit.__doc__ = (base_doc[:base_doc.find('eval_metric :')]
+ 'eval_metric : string, list of strings, callable or None, optional (default="ndcg")\n'
+ base_doc[base_doc.find(' If string, it should be a built-in evaluation metric to use.'):base_doc.find('early_stopping_rounds :')]
+ 'eval_at : list of int, optional (default=[1])\n'
' The evaluation positions of NDCG.\n'
+ base_doc[base_doc.find(' early_stopping_rounds :'):])
......@@ -626,13 +626,11 @@ class TestEngine(unittest.TestCase):
x2_negatively_correlated_with_y = np.random.random(size=number_of_dpoints)
x = np.column_stack((x1_positively_correlated_with_y, x2_negatively_correlated_with_y))
zs = np.random.normal(loc=0.0, scale=0.01, size=number_of_dpoints)
y = (
5 * x1_positively_correlated_with_y +
np.sin(10 * np.pi * x1_positively_correlated_with_y) -
5 * x2_negatively_correlated_with_y -
np.cos(10 * np.pi * x2_negatively_correlated_with_y) +
zs
)
y = (5 * x1_positively_correlated_with_y
+ np.sin(10 * np.pi * x1_positively_correlated_with_y)
- 5 * x2_negatively_correlated_with_y
- np.cos(10 * np.pi * x2_negatively_correlated_with_y)
+ zs)
trainset = lgb.Dataset(x, label=y)
params = {
'min_data': 20,
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment