Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
tianlh
LightGBM-DCU
Commits
3f4ef95b
Commit
3f4ef95b
authored
Jan 10, 2017
by
Guolin Ke
Browse files
fix warning
parent
fb732c34
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
73 additions
and
69 deletions
+73
-69
src/metric/regression_metric.hpp
src/metric/regression_metric.hpp
+25
-25
src/objective/regression_objective.hpp
src/objective/regression_objective.hpp
+48
-44
No files found.
src/metric/regression_metric.hpp
View file @
3f4ef95b
...
@@ -74,9 +74,9 @@ public:
...
@@ -74,9 +74,9 @@ public:
protected:
protected:
/*! \brief delta for Huber loss */
/*! \brief delta for Huber loss */
double
huber_delta_
;
score_t
huber_delta_
;
/*! \brief c for Fair loss */
/*! \brief c for Fair loss */
double
fair_c_
;
score_t
fair_c_
;
private:
private:
/*! \brief Number of data */
/*! \brief Number of data */
...
@@ -126,40 +126,40 @@ public:
...
@@ -126,40 +126,40 @@ public:
/*! \brief Huber loss for regression task */
/*! \brief Huber loss for regression task */
class
HuberLossMetric
:
public
RegressionMetric
<
HuberLossMetric
>
{
class
HuberLossMetric
:
public
RegressionMetric
<
HuberLossMetric
>
{
public:
public:
explicit
HuberLossMetric
(
const
MetricConfig
&
config
)
:
RegressionMetric
<
HuberLossMetric
>
(
config
)
{
explicit
HuberLossMetric
(
const
MetricConfig
&
config
)
:
RegressionMetric
<
HuberLossMetric
>
(
config
)
{
huber_delta_
=
config
.
huber_delta
;
huber_delta_
=
static_cast
<
score_t
>
(
config
.
huber_delta
)
;
}
}
inline
static
score_t
LossOnPoint
(
float
label
,
score_t
score
,
float
delta
,
float
)
{
inline
static
score_t
LossOnPoint
(
float
label
,
score_t
score
,
float
delta
,
float
)
{
const
double
diff
=
score
-
label
;
const
score_t
diff
=
score
-
label
;
if
(
std
::
abs
(
diff
)
<=
delta
)
{
if
(
std
::
abs
(
diff
)
<=
delta
)
{
return
0.5
*
diff
*
diff
;
return
0.5
f
*
diff
*
diff
;
}
else
{
}
else
{
return
delta
*
(
std
::
abs
(
diff
)
-
0.5
*
delta
);
return
delta
*
(
std
::
abs
(
diff
)
-
0.5
f
*
delta
);
}
}
}
}
inline
static
const
char
*
Name
()
{
inline
static
const
char
*
Name
()
{
return
"huber"
;
return
"huber"
;
}
}
};
};
/*! \brief Fair loss for regression task */
/*! \brief Fair loss for regression task */
// http://research.microsoft.com/en-us/um/people/zhang/INRIA/Publis/Tutorial-Estim/node24.html
// http://research.microsoft.com/en-us/um/people/zhang/INRIA/Publis/Tutorial-Estim/node24.html
class
FairLossMetric
:
public
RegressionMetric
<
FairLossMetric
>
{
class
FairLossMetric
:
public
RegressionMetric
<
FairLossMetric
>
{
public:
public:
explicit
FairLossMetric
(
const
MetricConfig
&
config
)
:
RegressionMetric
<
FairLossMetric
>
(
config
)
{
explicit
FairLossMetric
(
const
MetricConfig
&
config
)
:
RegressionMetric
<
FairLossMetric
>
(
config
)
{
fair_c_
=
config
.
fair_c
;
fair_c_
=
static_cast
<
score_t
>
(
config
.
fair_c
)
;
}
}
inline
static
score_t
LossOnPoint
(
float
label
,
score_t
score
,
float
,
float
c
)
{
inline
static
score_t
LossOnPoint
(
float
label
,
score_t
score
,
float
,
float
c
)
{
const
double
x
=
std
::
abs
(
score
-
label
);
const
score_t
x
=
std
::
f
abs
(
score
-
label
);
return
c
*
x
-
c
*
c
*
std
::
log
(
1.0
+
x
/
c
);
return
c
*
x
-
c
*
c
*
std
::
log
(
1.0
f
+
x
/
c
);
}
}
inline
static
const
char
*
Name
()
{
inline
static
const
char
*
Name
()
{
return
"fair"
;
return
"fair"
;
}
}
};
};
}
// namespace LightGBM
}
// namespace LightGBM
...
...
src/objective/regression_objective.hpp
View file @
3f4ef95b
...
@@ -23,15 +23,15 @@ public:
...
@@ -23,15 +23,15 @@ public:
}
}
void
GetGradients
(
const
score_t
*
score
,
score_t
*
gradients
,
void
GetGradients
(
const
score_t
*
score
,
score_t
*
gradients
,
score_t
*
hessians
)
const
override
{
score_t
*
hessians
)
const
override
{
if
(
weights_
==
nullptr
)
{
if
(
weights_
==
nullptr
)
{
#pragma omp parallel for schedule(static)
#pragma omp parallel for schedule(static)
for
(
data_size_t
i
=
0
;
i
<
num_data_
;
++
i
)
{
for
(
data_size_t
i
=
0
;
i
<
num_data_
;
++
i
)
{
gradients
[
i
]
=
(
score
[
i
]
-
label_
[
i
]);
gradients
[
i
]
=
(
score
[
i
]
-
label_
[
i
]);
hessians
[
i
]
=
1.0
;
hessians
[
i
]
=
1.0
;
}
}
}
else
{
}
else
{
#pragma omp parallel for schedule(static)
#pragma omp parallel for schedule(static)
for
(
data_size_t
i
=
0
;
i
<
num_data_
;
++
i
)
{
for
(
data_size_t
i
=
0
;
i
<
num_data_
;
++
i
)
{
gradients
[
i
]
=
(
score
[
i
]
-
label_
[
i
])
*
weights_
[
i
];
gradients
[
i
]
=
(
score
[
i
]
-
label_
[
i
])
*
weights_
[
i
];
hessians
[
i
]
=
weights_
[
i
];
hessians
[
i
]
=
weights_
[
i
];
...
@@ -52,10 +52,12 @@ private:
...
@@ -52,10 +52,12 @@ private:
const
float
*
weights_
;
const
float
*
weights_
;
};
};
/*!
* \brief L1 regression loss
*/
class
RegressionL1loss
:
public
ObjectiveFunction
{
class
RegressionL1loss
:
public
ObjectiveFunction
{
public:
public:
explicit
RegressionL1loss
(
const
ObjectiveConfig
&
config
)
{}
explicit
RegressionL1loss
(
const
ObjectiveConfig
&
)
{}
~
RegressionL1loss
()
{}
~
RegressionL1loss
()
{}
...
@@ -66,28 +68,28 @@ public:
...
@@ -66,28 +68,28 @@ public:
}
}
void
GetGradients
(
const
score_t
*
score
,
score_t
*
gradients
,
void
GetGradients
(
const
score_t
*
score
,
score_t
*
gradients
,
score_t
*
hessians
)
const
override
{
score_t
*
hessians
)
const
override
{
if
(
weights_
==
nullptr
)
{
if
(
weights_
==
nullptr
)
{
#pragma omp parallel for schedule(static)
#pragma omp parallel for schedule(static)
for
(
data_size_t
i
=
0
;
i
<
num_data_
;
++
i
)
{
for
(
data_size_t
i
=
0
;
i
<
num_data_
;
++
i
)
{
const
double
diff
=
score
[
i
]
-
label_
[
i
];
const
score_t
diff
=
score
[
i
]
-
label_
[
i
];
if
(
diff
>=
0.0
)
{
if
(
diff
>=
0.0
f
)
{
gradients
[
i
]
=
1.0
;
gradients
[
i
]
=
1.0
f
;
}
else
{
}
else
{
gradients
[
i
]
=
-
1.0
;
gradients
[
i
]
=
-
1.0
f
;
}
}
hessians
[
i
]
=
Common
::
ApproximateHessianWithGaussian
(
score
[
i
],
label_
[
i
],
gradients
[
i
]);
hessians
[
i
]
=
static_cast
<
score_t
>
(
Common
::
ApproximateHessianWithGaussian
(
score
[
i
],
label_
[
i
],
gradients
[
i
])
)
;
}
}
}
else
{
}
else
{
#pragma omp parallel for schedule(static)
#pragma omp parallel for schedule(static)
for
(
data_size_t
i
=
0
;
i
<
num_data_
;
++
i
)
{
for
(
data_size_t
i
=
0
;
i
<
num_data_
;
++
i
)
{
const
double
diff
=
score
[
i
]
-
label_
[
i
];
const
score_t
diff
=
score
[
i
]
-
label_
[
i
];
if
(
diff
>=
0.0
)
{
if
(
diff
>=
0.0
f
)
{
gradients
[
i
]
=
weights_
[
i
];
gradients
[
i
]
=
weights_
[
i
];
}
else
{
}
else
{
gradients
[
i
]
=
-
weights_
[
i
];
gradients
[
i
]
=
-
weights_
[
i
];
}
}
hessians
[
i
]
=
Common
::
ApproximateHessianWithGaussian
(
score
[
i
],
label_
[
i
],
gradients
[
i
],
weights_
[
i
]);
hessians
[
i
]
=
static_cast
<
score_t
>
(
Common
::
ApproximateHessianWithGaussian
(
score
[
i
],
label_
[
i
],
gradients
[
i
],
weights_
[
i
])
)
;
}
}
}
}
}
}
...
@@ -105,11 +107,13 @@ private:
...
@@ -105,11 +107,13 @@ private:
const
float
*
weights_
;
const
float
*
weights_
;
};
};
/*!
* \brief Huber regression loss
*/
class
RegressionHuberLoss
:
public
ObjectiveFunction
{
class
RegressionHuberLoss
:
public
ObjectiveFunction
{
public:
public:
explicit
RegressionHuberLoss
(
const
ObjectiveConfig
&
config
)
{
explicit
RegressionHuberLoss
(
const
ObjectiveConfig
&
config
)
{
delta_
=
config
.
huber_delta
;
delta_
=
static_cast
<
score_t
>
(
config
.
huber_delta
)
;
}
}
~
RegressionHuberLoss
()
{
~
RegressionHuberLoss
()
{
...
@@ -122,39 +126,39 @@ public:
...
@@ -122,39 +126,39 @@ public:
}
}
void
GetGradients
(
const
score_t
*
score
,
score_t
*
gradients
,
void
GetGradients
(
const
score_t
*
score
,
score_t
*
gradients
,
score_t
*
hessians
)
const
override
{
score_t
*
hessians
)
const
override
{
if
(
weights_
==
nullptr
)
{
if
(
weights_
==
nullptr
)
{
#pragma omp parallel for schedule(static)
#pragma omp parallel for schedule(static)
for
(
data_size_t
i
=
0
;
i
<
num_data_
;
++
i
)
{
for
(
data_size_t
i
=
0
;
i
<
num_data_
;
++
i
)
{
const
double
diff
=
score
[
i
]
-
label_
[
i
];
const
score_t
diff
=
score
[
i
]
-
label_
[
i
];
if
(
std
::
abs
(
diff
)
<=
delta_
)
{
if
(
std
::
abs
(
diff
)
<=
delta_
)
{
gradients
[
i
]
=
diff
;
gradients
[
i
]
=
diff
;
hessians
[
i
]
=
1.0
;
hessians
[
i
]
=
1.0
f
;
}
else
{
}
else
{
if
(
diff
>=
0.0
)
{
if
(
diff
>=
0.0
f
)
{
gradients
[
i
]
=
delta_
;
gradients
[
i
]
=
delta_
;
}
else
{
}
else
{
gradients
[
i
]
=
-
delta_
;
gradients
[
i
]
=
-
delta_
;
}
}
hessians
[
i
]
=
Common
::
ApproximateHessianWithGaussian
(
score
[
i
],
label_
[
i
],
gradients
[
i
]);
hessians
[
i
]
=
static_cast
<
score_t
>
(
Common
::
ApproximateHessianWithGaussian
(
score
[
i
],
label_
[
i
],
gradients
[
i
])
)
;
}
}
}
}
}
else
{
}
else
{
#pragma omp parallel for schedule(static)
#pragma omp parallel for schedule(static)
for
(
data_size_t
i
=
0
;
i
<
num_data_
;
++
i
)
{
for
(
data_size_t
i
=
0
;
i
<
num_data_
;
++
i
)
{
const
double
diff
=
score
[
i
]
-
label_
[
i
];
const
score_t
diff
=
score
[
i
]
-
label_
[
i
];
if
(
std
::
abs
(
diff
)
<=
delta_
)
{
if
(
std
::
abs
(
diff
)
<=
delta_
)
{
gradients
[
i
]
=
diff
*
weights_
[
i
];
gradients
[
i
]
=
diff
*
weights_
[
i
];
hessians
[
i
]
=
weights_
[
i
];
hessians
[
i
]
=
weights_
[
i
];
}
else
{
}
else
{
if
(
diff
>=
0.0
)
{
if
(
diff
>=
0.0
f
)
{
gradients
[
i
]
=
delta_
*
weights_
[
i
];
gradients
[
i
]
=
delta_
*
weights_
[
i
];
}
else
{
}
else
{
gradients
[
i
]
=
-
delta_
*
weights_
[
i
];
gradients
[
i
]
=
-
delta_
*
weights_
[
i
];
}
}
hessians
[
i
]
=
Common
::
ApproximateHessianWithGaussian
(
score
[
i
],
label_
[
i
],
gradients
[
i
],
weights_
[
i
]);
hessians
[
i
]
=
static_cast
<
score_t
>
(
Common
::
ApproximateHessianWithGaussian
(
score
[
i
],
label_
[
i
],
gradients
[
i
],
weights_
[
i
])
)
;
}
}
}
}
}
}
...
@@ -172,7 +176,7 @@ private:
...
@@ -172,7 +176,7 @@ private:
/*! \brief Pointer of weights */
/*! \brief Pointer of weights */
const
float
*
weights_
;
const
float
*
weights_
;
/*! \brief delta for Huber loss */
/*! \brief delta for Huber loss */
double
delta_
;
score_t
delta_
;
};
};
...
@@ -180,7 +184,7 @@ private:
...
@@ -180,7 +184,7 @@ private:
class
RegressionFairLoss
:
public
ObjectiveFunction
{
class
RegressionFairLoss
:
public
ObjectiveFunction
{
public:
public:
explicit
RegressionFairLoss
(
const
ObjectiveConfig
&
config
)
{
explicit
RegressionFairLoss
(
const
ObjectiveConfig
&
config
)
{
c_
=
config
.
fair_c
;
c_
=
static_cast
<
score_t
>
(
config
.
fair_c
)
;
}
}
~
RegressionFairLoss
()
{}
~
RegressionFairLoss
()
{}
...
@@ -192,18 +196,18 @@ public:
...
@@ -192,18 +196,18 @@ public:
}
}
void
GetGradients
(
const
score_t
*
score
,
score_t
*
gradients
,
void
GetGradients
(
const
score_t
*
score
,
score_t
*
gradients
,
score_t
*
hessians
)
const
override
{
score_t
*
hessians
)
const
override
{
if
(
weights_
==
nullptr
)
{
if
(
weights_
==
nullptr
)
{
#pragma omp parallel for schedule(static)
#pragma omp parallel for schedule(static)
for
(
data_size_t
i
=
0
;
i
<
num_data_
;
++
i
)
{
for
(
data_size_t
i
=
0
;
i
<
num_data_
;
++
i
)
{
const
double
x
=
score
[
i
]
-
label_
[
i
];
const
score_t
x
=
score
[
i
]
-
label_
[
i
];
gradients
[
i
]
=
c_
*
x
/
(
std
::
fabs
(
x
)
+
c_
);
gradients
[
i
]
=
c_
*
x
/
(
std
::
fabs
(
x
)
+
c_
);
hessians
[
i
]
=
c_
*
c_
/
((
std
::
fabs
(
x
)
+
c_
)
*
(
std
::
fabs
(
x
)
+
c_
));
hessians
[
i
]
=
c_
*
c_
/
((
std
::
fabs
(
x
)
+
c_
)
*
(
std
::
fabs
(
x
)
+
c_
));
}
}
}
else
{
}
else
{
#pragma omp parallel for schedule(static)
#pragma omp parallel for schedule(static)
for
(
data_size_t
i
=
0
;
i
<
num_data_
;
++
i
)
{
for
(
data_size_t
i
=
0
;
i
<
num_data_
;
++
i
)
{
const
double
x
=
score
[
i
]
-
label_
[
i
];
const
score_t
x
=
score
[
i
]
-
label_
[
i
];
gradients
[
i
]
=
c_
*
x
/
(
std
::
fabs
(
x
)
+
c_
);
gradients
[
i
]
=
c_
*
x
/
(
std
::
fabs
(
x
)
+
c_
);
gradients
[
i
]
*=
weights_
[
i
];
gradients
[
i
]
*=
weights_
[
i
];
hessians
[
i
]
=
c_
*
c_
/
((
std
::
fabs
(
x
)
+
c_
)
*
(
std
::
fabs
(
x
)
+
c_
));
hessians
[
i
]
=
c_
*
c_
/
((
std
::
fabs
(
x
)
+
c_
)
*
(
std
::
fabs
(
x
)
+
c_
));
...
@@ -224,7 +228,7 @@ private:
...
@@ -224,7 +228,7 @@ private:
/*! \brief Pointer of weights */
/*! \brief Pointer of weights */
const
float
*
weights_
;
const
float
*
weights_
;
/*! \brief c for Fair loss */
/*! \brief c for Fair loss */
double
c_
;
score_t
c_
;
};
};
}
// namespace LightGBM
}
// namespace LightGBM
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment