Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
tianlh
LightGBM-DCU
Commits
6219df7a
Commit
6219df7a
authored
Jan 09, 2017
by
Tsukasa OMOTO
Committed by
Guolin Ke
Jan 09, 2017
Browse files
Add L1 objective function (#175)
* Add L1 objective function * fix hessians * update
parent
a6f47d00
Changes
4
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
81 additions
and
3 deletions
+81
-3
docs/Parameters.md
docs/Parameters.md
+4
-2
include/LightGBM/utils/common.h
include/LightGBM/utils/common.h
+18
-0
src/objective/objective_function.cpp
src/objective/objective_function.cpp
+4
-1
src/objective/regression_objective.hpp
src/objective/regression_objective.hpp
+55
-0
No files found.
docs/Parameters.md
View file @
6219df7a
...
@@ -16,9 +16,11 @@ The parameter format is ```key1=value1 key2=value2 ... ``` . And parameters can
...
@@ -16,9 +16,11 @@ The parameter format is ```key1=value1 key2=value2 ... ``` . And parameters can
*
```task```
, default=
```train```
, type=enum, options=
```train```
,
```prediction```
*
```task```
, default=
```train```
, type=enum, options=
```train```
,
```prediction```
*
```train```
for training
*
```train```
for training
*
```prediction```
for prediction.
*
```prediction```
for prediction.
*
```application```
, default=
```regression```
, type=enum, options=
```regression```
,
```huber```
,
```binary```
,
```lambdarank```
,
```multiclass```
, alias=
```objective```
,
```app```
*
```application```
, default=
```regression```
, type=enum, options=
```regression```
,
```
regression_l1```
,
```
huber```
,
```binary```
,
```lambdarank```
,
```multiclass```
, alias=
```objective```
,
```app```
*
```regression```
, regression application
*
```regression```
, regression application
*
```huber```
,
[
Huber loss
](
https://en.wikipedia.org/wiki/Huber_loss
"Huber loss - Wikipedia"
)
for regression task
*
```regression_l2```
, L2 loss, alias=
```mean_squared_error```
,
```mse```
*
```regression_l1```
, L1 loss, alias=
```mean_absolute_error```
,
```mae```
*
```huber```
,
[
Huber loss
](
https://en.wikipedia.org/wiki/Huber_loss
"Huber loss - Wikipedia"
)
*
```binary```
, binary classification application
*
```binary```
, binary classification application
*
```lambdarank```
, lambdarank application
*
```lambdarank```
, lambdarank application
*
```multiclass```
, multi-class classification application, should set
```num_class```
as well
*
```multiclass```
, multi-class classification application, should set
```num_class```
as well
...
...
include/LightGBM/utils/common.h
View file @
6219df7a
...
@@ -400,6 +400,24 @@ inline void SortForPair(std::vector<T1>& keys, std::vector<T2>& values, size_t s
...
@@ -400,6 +400,24 @@ inline void SortForPair(std::vector<T1>& keys, std::vector<T2>& values, size_t s
}
}
/*
* approximate hessians of absolute loss with Gaussian function
* cf. https://en.wikipedia.org/wiki/Gaussian_function
*
* y is a prediction.
* t mesas true target.
* w means weights.
*/
inline
static
double
ApproximateHessianWithGaussian
(
double
y
,
double
t
,
double
w
=
1.0
f
)
{
const
double
diff
=
y
-
t
;
const
double
pi
=
M_PI
;
const
double
x
=
(
std
::
fabs
(
diff
)
>
0.0
)
?
std
::
fabs
(
diff
)
:
1.0e-6
;
const
double
a
=
2.0
*
w
;
// difference of two first derivatives, (zero to inf) and (zero to -inf).
const
double
b
=
0.0
;
const
double
c
=
(
std
::
fabs
(
y
)
+
std
::
fabs
(
t
))
/
1.0e3
;
return
w
*
std
::
exp
(
-
(
x
-
b
)
*
(
x
-
b
)
/
2.0
*
c
*
c
)
*
a
/
std
::
sqrt
(
2.0
*
pi
)
*
c
;
}
}
// namespace Common
}
// namespace Common
}
// namespace LightGBM
}
// namespace LightGBM
...
...
src/objective/objective_function.cpp
View file @
6219df7a
...
@@ -7,8 +7,11 @@
...
@@ -7,8 +7,11 @@
namespace
LightGBM
{
namespace
LightGBM
{
ObjectiveFunction
*
ObjectiveFunction
::
CreateObjectiveFunction
(
const
std
::
string
&
type
,
const
ObjectiveConfig
&
config
)
{
ObjectiveFunction
*
ObjectiveFunction
::
CreateObjectiveFunction
(
const
std
::
string
&
type
,
const
ObjectiveConfig
&
config
)
{
if
(
type
==
std
::
string
(
"regression"
))
{
if
(
type
==
std
::
string
(
"regression"
)
||
type
==
std
::
string
(
"regression_l2"
)
||
type
==
std
::
string
(
"mean_squared_error"
)
||
type
==
std
::
string
(
"mse"
))
{
return
new
RegressionL2loss
(
config
);
return
new
RegressionL2loss
(
config
);
}
else
if
(
type
==
std
::
string
(
"regression_l1"
)
||
type
==
std
::
string
(
"mean_absolute_error"
)
||
type
==
std
::
string
(
"mae"
))
{
return
new
RegressionL1loss
(
config
);
}
else
if
(
type
==
std
::
string
(
"huber"
))
{
}
else
if
(
type
==
std
::
string
(
"huber"
))
{
return
new
RegressionLHuberLoss
(
config
);
return
new
RegressionLHuberLoss
(
config
);
}
else
if
(
type
==
std
::
string
(
"binary"
))
{
}
else
if
(
type
==
std
::
string
(
"binary"
))
{
...
...
src/objective/regression_objective.hpp
View file @
6219df7a
...
@@ -2,6 +2,7 @@
...
@@ -2,6 +2,7 @@
#define LIGHTGBM_OBJECTIVE_REGRESSION_OBJECTIVE_HPP_
#define LIGHTGBM_OBJECTIVE_REGRESSION_OBJECTIVE_HPP_
#include <LightGBM/objective_function.h>
#include <LightGBM/objective_function.h>
#include <LightGBM/utils/common.h>
namespace
LightGBM
{
namespace
LightGBM
{
/*!
/*!
...
@@ -51,6 +52,60 @@ private:
...
@@ -51,6 +52,60 @@ private:
const
float
*
weights_
;
const
float
*
weights_
;
};
};
class
RegressionL1loss
:
public
ObjectiveFunction
{
public:
explicit
RegressionL1loss
(
const
ObjectiveConfig
&
config
)
{}
~
RegressionL1loss
()
{}
void
Init
(
const
Metadata
&
metadata
,
data_size_t
num_data
)
override
{
num_data_
=
num_data
;
label_
=
metadata
.
label
();
weights_
=
metadata
.
weights
();
}
void
GetGradients
(
const
score_t
*
score
,
score_t
*
gradients
,
score_t
*
hessians
)
const
override
{
if
(
weights_
==
nullptr
)
{
#pragma omp parallel for schedule(static)
for
(
data_size_t
i
=
0
;
i
<
num_data_
;
++
i
)
{
const
double
diff
=
score
[
i
]
-
label_
[
i
];
if
(
diff
>=
0.0
)
{
gradients
[
i
]
=
1.0
;
}
else
{
gradients
[
i
]
=
-
1.0
;
}
hessians
[
i
]
=
Common
::
ApproximateHessianWithGaussian
(
score
[
i
],
label_
[
i
]);
}
}
else
{
#pragma omp parallel for schedule(static)
for
(
data_size_t
i
=
0
;
i
<
num_data_
;
++
i
)
{
const
double
diff
=
score
[
i
]
-
label_
[
i
];
if
(
diff
>=
0.0
)
{
gradients
[
i
]
=
weights_
[
i
];
}
else
{
gradients
[
i
]
=
-
weights_
[
i
];
}
hessians
[
i
]
=
Common
::
ApproximateHessianWithGaussian
(
score
[
i
],
label_
[
i
],
weights_
[
i
]);
}
}
}
const
char
*
GetName
()
const
override
{
return
"regression_l1"
;
}
private:
/*! \brief Number of data */
data_size_t
num_data_
;
/*! \brief Pointer of label */
const
float
*
label_
;
/*! \brief Pointer of weights */
const
float
*
weights_
;
};
class
RegressionLHuberLoss
:
public
ObjectiveFunction
{
class
RegressionLHuberLoss
:
public
ObjectiveFunction
{
public:
public:
explicit
RegressionLHuberLoss
(
const
ObjectiveConfig
&
config
)
{
explicit
RegressionLHuberLoss
(
const
ObjectiveConfig
&
config
)
{
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment