Commit 244db078 authored by Nikita Titov's avatar Nikita Titov Committed by Qiwei Ye
Browse files

return self (#1602)

parent dcf9ad2e
......@@ -64,7 +64,7 @@ print('7th feature name is:', repr(lgb_train.feature_name[6]))
# save model to file
gbm.save_model('model.txt')
# dump model to json (and save to file)
# dump model to JSON (and save to file)
print('Dump model to JSON...')
model_json = gbm.dump_model()
......
This diff is collapsed.
......@@ -125,10 +125,7 @@ def train(params, train_set, num_boost_round=100,
if not isinstance(train_set, Dataset):
raise TypeError("Training only accepts Dataset object")
train_set._update_params(params)
train_set._set_predictor(predictor)
train_set.set_feature_name(feature_name)
train_set.set_categorical_feature(categorical_feature)
train_set._update_params(params)._set_predictor(predictor).set_feature_name(feature_name).set_categorical_feature(categorical_feature)
is_valid_contain_train = False
train_data_name = "training"
......@@ -148,9 +145,7 @@ def train(params, train_set, num_boost_round=100,
continue
if not isinstance(valid_data, Dataset):
raise TypeError("Traninig only accepts Dataset object")
valid_data._update_params(params)
valid_data.set_reference(train_set)
reduced_valid_sets.append(valid_data)
reduced_valid_sets.append(valid_data._update_params(params).set_reference(train_set))
if valid_names is not None and len(valid_names) > i:
name_valid_sets.append(valid_names[i])
else:
......@@ -230,8 +225,7 @@ def train(params, train_set, num_boost_round=100,
for dataset_name, eval_name, score, _ in evaluation_result_list:
booster.best_score[dataset_name][eval_name] = score
if not keep_training_booster:
booster.model_from_string(booster.model_to_string(), False)
booster.free_dataset()
booster.model_from_string(booster.model_to_string(), False).free_dataset()
return booster
......@@ -421,10 +415,7 @@ def cv(params, train_set, num_boost_round=100,
predictor = init_model._to_predictor()
else:
predictor = None
train_set._update_params(params)
train_set._set_predictor(predictor)
train_set.set_feature_name(feature_name)
train_set.set_categorical_feature(categorical_feature)
train_set._update_params(params)._set_predictor(predictor).set_feature_name(feature_name).set_categorical_feature(categorical_feature)
if metrics is not None:
params['metric'] = metrics
......
......@@ -23,24 +23,24 @@ def _objective_function_wrapper(func):
Parameters
----------
func: callable
func : callable
Expects a callable with signature ``func(y_true, y_pred)`` or ``func(y_true, y_pred, group):
y_true: array-like of shape = [n_samples]
y_true : array-like of shape = [n_samples]
The target values.
y_pred: array-like of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class)
y_pred : array-like of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class)
The predicted values.
group: array-like
group : array-like
Group/query data, used for ranking task.
Returns
-------
new_func: callable
new_func : callable
The new objective function as expected by ``lightgbm.engine.train``.
The signature is ``new_func(preds, dataset)``:
preds: array-like of shape = [n_samples] or shape = [n_samples * n_classes]
preds : array-like of shape = [n_samples] or shape = [n_samples * n_classes]
The predicted values.
dataset: ``dataset``
dataset : ``dataset``
The training set from which the labels will be extracted using
``dataset.get_label()``.
"""
......@@ -82,31 +82,31 @@ def _eval_function_wrapper(func):
Parameters
----------
func: callable
func : callable
Expects a callable with following functions:
``func(y_true, y_pred)``,
``func(y_true, y_pred, weight)``
or ``func(y_true, y_pred, weight, group)``
and return (eval_name->str, eval_result->float, is_bigger_better->Bool):
y_true: array-like of shape = [n_samples]
y_true : array-like of shape = [n_samples]
The target values.
y_pred: array-like of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class)
y_pred : array-like of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class)
The predicted values.
weight: array_like of shape = [n_samples]
weight : array_like of shape = [n_samples]
The weight of samples.
group: array-like
group : array-like
Group/query data, used for ranking task.
Returns
-------
new_func: callable
new_func : callable
The new eval function as expected by ``lightgbm.engine.train``.
The signature is ``new_func(preds, dataset)``:
preds: array-like of shape = [n_samples] or shape = [n_samples * n_classes]
preds : array-like of shape = [n_samples] or shape = [n_samples * n_classes]
The predicted values.
dataset: ``dataset``
dataset : ``dataset``
The training set from which the labels will be extracted using
``dataset.get_label()``.
"""
......@@ -232,15 +232,15 @@ class LGBMModel(_LGBMModelBase):
``objective(y_true, y_pred) -> grad, hess`` or
``objective(y_true, y_pred, group) -> grad, hess``:
y_true: array-like of shape = [n_samples]
y_true : array-like of shape = [n_samples]
The target values.
y_pred: array-like of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task)
y_pred : array-like of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task)
The predicted values.
group: array-like
group : array-like
Group/query data, used for ranking task.
grad: array-like of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task)
grad : array-like of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task)
The value of the gradient for each sample point.
hess: array-like of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task)
hess : array-like of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task)
The value of the second derivative for each sample point.
For multi-class task, the y_pred is group by class_id first, then group by row_id.
......@@ -365,19 +365,19 @@ class LGBMModel(_LGBMModelBase):
Returns (eval_name, eval_result, is_bigger_better) or
list of (eval_name, eval_result, is_bigger_better)
y_true: array-like of shape = [n_samples]
y_true : array-like of shape = [n_samples]
The target values.
y_pred: array-like of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class)
y_pred : array-like of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class)
The predicted values.
weight: array-like of shape = [n_samples]
weight : array-like of shape = [n_samples]
The weight of samples.
group: array-like
group : array-like
Group/query data, used for ranking task.
eval_name: string
eval_name : string
The name of evaluation.
eval_result: float
eval_result : float
The eval result.
is_bigger_better: bool
is_bigger_better : bool
Is eval result bigger better, e.g. AUC is bigger_better.
For multi-class task, the y_pred is group by class_id first, then group by row_id.
......@@ -434,8 +434,7 @@ class LGBMModel(_LGBMModelBase):
def _construct_dataset(X, y, sample_weight, init_score, group, params):
ret = Dataset(X, label=y, weight=sample_weight, group=group, params=params)
ret.set_init_score(init_score)
return ret
return ret.set_init_score(init_score)
train_set = _construct_dataset(X, y, sample_weight, init_score, group, params)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment