Unverified Commit 04e2356b authored by Nikita Titov's avatar Nikita Titov Committed by GitHub
Browse files

[python][docs] several little fixes (#1533)

parent c6cdea75
......@@ -1097,7 +1097,7 @@ class Dataset(object):
def _set_predictor(self, predictor):
"""
Set predictor for continued training, not recommand for user to call this function.
Set predictor for continued training, not recommended for user to call this function.
Please set init_model in engine.train or engine.cv
"""
if predictor is self._predictor:
......
......@@ -45,7 +45,7 @@ def train(params, train_set, num_boost_round=100,
If you want to get i-th row preds in j-th class, the access way is preds[j * num_data + i].
Note: should return (eval_name, eval_result, is_higher_better) or list of such tuples.
To ignore the default metric in params, set it to the string ``"None"``
init_model : string or None, optional (default=None)
init_model : string, Booster or None, optional (default=None)
Filename of LightGBM model or Booster instance used for continue training.
feature_name : list of strings or 'auto', optional (default="auto")
Feature names.
......@@ -268,7 +268,7 @@ def _make_n_folds(full_data, folds, nfold, params, seed, fpreproc=None, stratifi
raise LightGBMError('Scikit-learn is required for lambdarank cv.')
# lambdarank task, split according to groups
group_info = full_data.get_group().astype(int)
flatted_group = np.repeat(range(len(group_info)), repeats=group_info)
flatted_group = np.repeat(range_(len(group_info)), repeats=group_info)
group_kfold = _LGBMGroupKFold(n_splits=nfold)
folds = group_kfold.split(X=np.zeros(num_data), groups=flatted_group)
elif stratified:
......@@ -352,7 +352,7 @@ def cv(params, train_set, num_boost_round=100,
If you want to get i-th row preds in j-th class, the access way is preds[j * num_data + i].
Note: should return (eval_name, eval_result, is_higher_better) or list of such tuples.
To ignore the default metric in params, set it to the string ``"None"``
init_model : string or None, optional (default=None)
init_model : string, Booster or None, optional (default=None)
Filename of LightGBM model or Booster instance used for continue training.
feature_name : list of strings or 'auto', optional (default="auto")
Feature names.
......
......@@ -10,7 +10,7 @@ from io import BytesIO
import numpy as np
from .basic import Booster
from .compat import MATPLOTLIB_INSTALLED, GRAPHVIZ_INSTALLED, string_type
from .compat import MATPLOTLIB_INSTALLED, GRAPHVIZ_INSTALLED, range_, string_type
from .sklearn import LGBMModel
......@@ -216,7 +216,7 @@ def plot_metric(booster, metric=None, dataset_names=None,
raise KeyError('No given metric in eval results.')
results = metrics_for_one[metric]
num_iteration, max_result, min_result = len(results), max(results), min(results)
x_ = range(num_iteration)
x_ = range_(num_iteration)
ax.plot(x_, results, label=name)
for name in dataset_names:
......
......@@ -193,7 +193,7 @@ class LGBMModel(_LGBMModelBase):
Number of parallel threads.
silent : bool, optional (default=True)
Whether to print messages while running boosting.
importance_type : str, optional (default='split')
importance_type : string, optional (default='split')
The type of feature importance to be filled into ``feature_importances_``.
If "split", result contains numbers of times the feature is used in a model.
If "gain", result contains total gains of splits which use the feature.
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment