Commit 04ede377 authored by zhangyafeikimi's avatar zhangyafeikimi Committed by Qiwei Ye
Browse files

optimize performace (#363)

parent d5bbaa26
...@@ -100,7 +100,7 @@ class MultiErrorMetric: public MulticlassMetric<MultiErrorMetric> { ...@@ -100,7 +100,7 @@ class MultiErrorMetric: public MulticlassMetric<MultiErrorMetric> {
public: public:
explicit MultiErrorMetric(const MetricConfig& config) :MulticlassMetric<MultiErrorMetric>(config) {} explicit MultiErrorMetric(const MetricConfig& config) :MulticlassMetric<MultiErrorMetric>(config) {}
inline static double LossOnPoint(float label, std::vector<double> score) { inline static double LossOnPoint(float label, std::vector<double>& score) {
size_t k = static_cast<size_t>(label); size_t k = static_cast<size_t>(label);
for (size_t i = 0; i < score.size(); ++i){ for (size_t i = 0; i < score.size(); ++i){
if (i != k && score[i] >= score[k]) { if (i != k && score[i] >= score[k]) {
...@@ -120,7 +120,7 @@ class MultiLoglossMetric: public MulticlassMetric<MultiLoglossMetric> { ...@@ -120,7 +120,7 @@ class MultiLoglossMetric: public MulticlassMetric<MultiLoglossMetric> {
public: public:
explicit MultiLoglossMetric(const MetricConfig& config) :MulticlassMetric<MultiLoglossMetric>(config) {} explicit MultiLoglossMetric(const MetricConfig& config) :MulticlassMetric<MultiLoglossMetric>(config) {}
inline static double LossOnPoint(float label, std::vector<double> score) { inline static double LossOnPoint(float label, std::vector<double>& score) {
size_t k = static_cast<size_t>(label); size_t k = static_cast<size_t>(label);
Common::Softmax(&score); Common::Softmax(&score);
if (score[k] > kEpsilon) { if (score[k] > kEpsilon) {
......
...@@ -48,9 +48,10 @@ public: ...@@ -48,9 +48,10 @@ public:
void GetGradients(const double* score, score_t* gradients, score_t* hessians) const override { void GetGradients(const double* score, score_t* gradients, score_t* hessians) const override {
if (weights_ == nullptr) { if (weights_ == nullptr) {
#pragma omp parallel for schedule(static) std::vector<double> rec;
#pragma omp parallel for schedule(static) private(rec)
for (data_size_t i = 0; i < num_data_; ++i) { for (data_size_t i = 0; i < num_data_; ++i) {
std::vector<double> rec(num_class_); rec.resize(num_class_);
for (int k = 0; k < num_class_; ++k){ for (int k = 0; k < num_class_; ++k){
size_t idx = static_cast<size_t>(num_data_) * k + i; size_t idx = static_cast<size_t>(num_data_) * k + i;
rec[k] = static_cast<double>(score[idx]); rec[k] = static_cast<double>(score[idx]);
...@@ -69,9 +70,10 @@ public: ...@@ -69,9 +70,10 @@ public:
} }
} }
} else { } else {
#pragma omp parallel for schedule(static) std::vector<double> rec;
#pragma omp parallel for schedule(static) private(rec)
for (data_size_t i = 0; i < num_data_; ++i) { for (data_size_t i = 0; i < num_data_; ++i) {
std::vector<double> rec(num_class_); rec.resize(num_class_);
for (int k = 0; k < num_class_; ++k){ for (int k = 0; k < num_class_; ++k){
size_t idx = static_cast<size_t>(num_data_) * k + i; size_t idx = static_cast<size_t>(num_data_) * k + i;
rec[k] = static_cast<double>(score[idx]); rec[k] = static_cast<double>(score[idx]);
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment