callback.py 6.16 KB
Newer Older
wxchan's avatar
wxchan committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
# coding: utf-8
# pylint: disable = invalid-name, W0105
from __future__ import absolute_import
import collections

class EarlyStopException(Exception):
    """Exception of early stopping.
    Parameters
    ----------
    best_iteration : int
        The best iteration stopped.
    """
    def __init__(self, best_iteration):
        super(EarlyStopException, self).__init__()
        self.best_iteration = best_iteration

# Callback environment used by callbacks
CallbackEnv = collections.namedtuple(
    "LightGBMCallbackEnv",
    ["model",
     "cvfolds",
     "iteration",
     "begin_iteration",
     "end_iteration",
     "evaluation_result_list"])

def _format_eval_result(value, show_stdv=True):
    """format metric string"""
    if len(value) == 4:
        return '%s\'s %s:%g' % (value[0], value[1], value[2])
    elif len(value) == 5:
        if show_stdv:
            return '%s\'s %s:%g+%g' % (value[0], value[1], value[2], value[4])
        else:
            return '%s\'s %s:%g' % (value[0], value[1], value[2])
    else:
        raise ValueError("wrong metric value")


def print_evaluation(period=1, show_stdv=True):
    """Create a callback that print evaluation result.

    Parameters
    ----------
    period : int
        The period to log the evaluation results

    show_stdv : bool, optional
49
        Whether show stdv if provided
wxchan's avatar
wxchan committed
50
51
52
53
54
55
56
57

    Returns
    -------
    callback : function
        A callback that print evaluation every period iterations.
    """
    def callback(env):
        """internal function"""
58
        if not env.evaluation_result_list or period <= 0:
wxchan's avatar
wxchan committed
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
            return
        if env.iteration % period == 0 or env.iteration + 1 == env.begin_iteration:
            result = '\t'.join([_format_eval_result(x, show_stdv) \
                for x in env.evaluation_result_list])
            print('[%d]\t%s' % (env.iteration, result))
    return callback


def record_evaluation(eval_result):
    """Create a call back that records the evaluation history into eval_result.

    Parameters
    ----------
    eval_result : dict
       A dictionary to store the evaluation results.

    Returns
    -------
    callback : function
        The requested callback function.
    """
    if not isinstance(eval_result, dict):
        raise TypeError('eval_result has to be a dictionary')
    eval_result.clear()

    def init(env):
        """internal function"""
86
87
        for data_name, _, _, _ in env.evaluation_result_list:
            eval_result.setdefault(data_name, collections.defaultdict(list))
wxchan's avatar
wxchan committed
88
89
90

    def callback(env):
        """internal function"""
91
        if not eval_result:
wxchan's avatar
wxchan committed
92
93
94
95
96
97
98
            init(env)
        for data_name, eval_name, result, _ in env.evaluation_result_list:
            eval_result[data_name][eval_name].append(result)
    return callback


def reset_learning_rate(learning_rates):
99
    """Reset learning rate after first iteration
wxchan's avatar
wxchan committed
100
101
102
103
104
105

    NOTE: the initial learning rate will still take in-effect on first iteration.

    Parameters
    ----------
    learning_rates: list or function
106
107
108
109
        List of learning rate for each boosting round \
        or a customized function that calculates learning_rate in terms of \
        current number of round and the total number of boosting round \
        (e.g. yields learning rate decay)
wxchan's avatar
wxchan committed
110
111
112
113
114
115
116
117
118
119
120
        - list l: learning_rate = l[current_round]
        - function f: learning_rate = f(current_round, total_boost_round)

    Returns
    -------
    callback : function
        The requested callback function.
    """
    def callback(env):
        """internal function"""
        booster = env.model
121
        iteration = env.iteration
wxchan's avatar
wxchan committed
122
123
124
        if isinstance(learning_rates, list):
            if len(learning_rates) != env.end_iteration:
                raise ValueError("Length of list 'learning_rates' has to equal 'num_boost_round'.")
125
            booster.reset_parameter({'learning_rate':learning_rates[iteration]})
wxchan's avatar
wxchan committed
126
        else:
127
            booster.reset_parameter({'learning_rate':learning_rates(iteration, env.end_iteration)})
wxchan's avatar
wxchan committed
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
    callback.before_iteration = True
    return callback


def early_stop(stopping_rounds, verbose=True):
    """Create a callback that activates early stopping.
    Activates early stopping.
    Requires at least one validation data and one metric
    If there's more than one, will check all of them

    Parameters
    ----------
    stopping_rounds : int
       The stopping rounds before the trend occur.

    verbose : optional, bool
        Whether to print message about early stopping information.

    Returns
    -------
    callback : function
        The requested callback function.
    """
    factor_to_bigger_better = {}
    best_score = {}
    best_iter = {}
    best_msg = {}
    def init(env):
        """internal function"""
157
        if not env.evaluation_result_list:
wxchan's avatar
wxchan committed
158
159
160
161
162
163
164
165
166
167
168
            raise ValueError('For early stopping you need at least one set in evals.')

        if verbose:
            msg = "Train until valid scores didn't improve in {} rounds."
            print(msg.format(stopping_rounds))

        for i in range(len(env.evaluation_result_list)):
            best_score[i] = float('-inf')
            best_iter[i] = 0
            if verbose:
                best_msg[i] = ""
169
            factor_to_bigger_better[i] = 1.0 if env.evaluation_result_list[i][3] else -1.0
wxchan's avatar
wxchan committed
170
171
172

    def callback(env):
        """internal function"""
173
        if not best_score:
wxchan's avatar
wxchan committed
174
175
176
177
178
179
180
181
182
183
184
185
186
187
            init(env)
        for i in range(len(env.evaluation_result_list)):
            score = env.evaluation_result_list[i][2] * factor_to_bigger_better[i]
            if score > best_score[i]:
                best_score[i] = score
                best_iter[i] = env.iteration
                if verbose:
                    best_msg[i] = '[%d]\t%s' % (env.iteration, \
                        '\t'.join([_format_eval_result(x) for x in env.evaluation_result_list]))
            else:
                if env.iteration - best_iter[i] >= stopping_rounds:
                    if env.model is not None:
                        env.model.set_attr(best_iteration=str(best_iter[i]))
                    if verbose:
188
189
                        print('early stopping, best iteration is:')
                        print(best_msg[i])
wxchan's avatar
wxchan committed
190
191
                    raise EarlyStopException(best_iter[i])
    return callback