Unverified Commit 4f47547c authored by James Lamb's avatar James Lamb Committed by GitHub
Browse files

[CUDA] consolidate CUDA versions (#5677)



* [ci] speed up if-else, swig, and lint conda setup

* add 'source activate'

* python constraint

* start removing cuda v1

* comment out CI

* remove more references

* revert some unnecessaary changes

* revert a few more mistakes

* revert another change that ignored params

* sigh

* remove CUDATreeLearner

* fix tests, docs

* fix quoting in setup.py

* restore all CI

* Apply suggestions from code review
Co-authored-by: default avatarshiyu1994 <shiyu_k1994@qq.com>

* Apply suggestions from code review

* completely remove cuda_exp, update docs

---------
Co-authored-by: default avatarshiyu1994 <shiyu_k1994@qq.com>
parent 5ffd7571
......@@ -4,7 +4,7 @@
* license information.
*/
#ifdef USE_CUDA_EXP
#ifdef USE_CUDA
#include "cuda_binary_objective.hpp"
......@@ -61,4 +61,4 @@ void CUDABinaryLogloss::Init(const Metadata& metadata, data_size_t num_data) {
} // namespace LightGBM
#endif // USE_CUDA_EXP
#endif // USE_CUDA
......@@ -4,7 +4,7 @@
* license information.
*/
#ifdef USE_CUDA_EXP
#ifdef USE_CUDA
#include <algorithm>
......@@ -206,4 +206,4 @@ void CUDABinaryLogloss::LaunchResetOVACUDALabelKernel() const {
} // namespace LightGBM
#endif // USE_CUDA_EXP
#endif // USE_CUDA
......@@ -7,7 +7,7 @@
#ifndef LIGHTGBM_OBJECTIVE_CUDA_CUDA_BINARY_OBJECTIVE_HPP_
#define LIGHTGBM_OBJECTIVE_CUDA_CUDA_BINARY_OBJECTIVE_HPP_
#ifdef USE_CUDA_EXP
#ifdef USE_CUDA
#define GET_GRADIENTS_BLOCK_SIZE_BINARY (1024)
#define CALC_INIT_SCORE_BLOCK_SIZE_BINARY (1024)
......@@ -58,6 +58,6 @@ class CUDABinaryLogloss : public CUDAObjectiveInterface<BinaryLogloss> {
} // namespace LightGBM
#endif // USE_CUDA_EXP
#endif // USE_CUDA
#endif // LIGHTGBM_OBJECTIVE_CUDA_CUDA_BINARY_OBJECTIVE_HPP_
......@@ -3,7 +3,7 @@
* Licensed under the MIT License. See LICENSE file in the project root for license information.
*/
#ifdef USE_CUDA_EXP
#ifdef USE_CUDA
#include "cuda_multiclass_objective.hpp"
......@@ -59,4 +59,4 @@ const double* CUDAMulticlassOVA::ConvertOutputCUDA(const data_size_t num_data, c
} // namespace LightGBM
#endif // USE_CUDA_EXP
#endif // USE_CUDA
......@@ -3,7 +3,7 @@
* Licensed under the MIT License. See LICENSE file in the project root for license information.
*/
#ifdef USE_CUDA_EXP
#ifdef USE_CUDA
#include <algorithm>
......@@ -105,4 +105,4 @@ const double* CUDAMulticlassSoftmax::LaunchConvertOutputCUDAKernel(
} // namespace LightGBM
#endif // USE_CUDA_EXP
#endif // USE_CUDA
......@@ -5,7 +5,7 @@
#ifndef LIGHTGBM_OBJECTIVE_CUDA_CUDA_MULTICLASS_OBJECTIVE_HPP_
#define LIGHTGBM_OBJECTIVE_CUDA_CUDA_MULTICLASS_OBJECTIVE_HPP_
#ifdef USE_CUDA_EXP
#ifdef USE_CUDA
#include <LightGBM/cuda/cuda_objective_function.hpp>
......@@ -74,5 +74,5 @@ class CUDAMulticlassOVA: public CUDAObjectiveInterface<MulticlassOVA> {
} // namespace LightGBM
#endif // USE_CUDA_EXP
#endif // USE_CUDA
#endif // LIGHTGBM_OBJECTIVE_CUDA_CUDA_MULTICLASS_OBJECTIVE_HPP_
......@@ -4,7 +4,7 @@
* license information.
*/
#ifdef USE_CUDA_EXP
#ifdef USE_CUDA
#include <string>
#include <vector>
......@@ -64,4 +64,4 @@ void CUDARankXENDCG::GenerateItemRands() const {
} // namespace LightGBM
#endif // USE_CUDA_EXP
#endif // USE_CUDA
......@@ -4,7 +4,7 @@
* license information.
*/
#ifdef USE_CUDA_EXP
#ifdef USE_CUDA
#include "cuda_rank_objective.hpp"
......@@ -658,4 +658,4 @@ void CUDARankXENDCG::LaunchGetGradientsKernel(const double* score, score_t* grad
} // namespace LightGBM
#endif // USE_CUDA_EXP
#endif // USE_CUDA
......@@ -7,7 +7,7 @@
#ifndef LIGHTGBM_OBJECTIVE_CUDA_CUDA_RANK_OBJECTIVE_HPP_
#define LIGHTGBM_OBJECTIVE_CUDA_CUDA_RANK_OBJECTIVE_HPP_
#ifdef USE_CUDA_EXP
#ifdef USE_CUDA
#define NUM_QUERY_PER_BLOCK (10)
......@@ -118,5 +118,5 @@ class CUDARankXENDCG : public CUDALambdaRankObjectiveInterface<RankXENDCG> {
} // namespace LightGBM
#endif // USE_CUDA_EXP
#endif // USE_CUDA
#endif // LIGHTGBM_OBJECTIVE_CUDA_CUDA_RANK_OBJECTIVE_HPP_
......@@ -4,7 +4,7 @@
* license information.
*/
#ifdef USE_CUDA_EXP
#ifdef USE_CUDA
#include "cuda_regression_objective.hpp"
......@@ -85,4 +85,4 @@ double CUDARegressionPoissonLoss::LaunchCalcInitScoreKernel(const int class_id)
} // namespace LightGBM
#endif // USE_CUDA_EXP
#endif // USE_CUDA
......@@ -4,7 +4,7 @@
* license information.
*/
#ifdef USE_CUDA_EXP
#ifdef USE_CUDA
#include "cuda_regression_objective.hpp"
#include <LightGBM/cuda/cuda_algorithms.hpp>
......@@ -353,4 +353,4 @@ const double* CUDARegressionPoissonLoss::LaunchConvertOutputCUDAKernel(const dat
} // namespace LightGBM
#endif // USE_CUDA_EXP
#endif // USE_CUDA
......@@ -7,7 +7,7 @@
#ifndef LIGHTGBM_OBJECTIVE_CUDA_CUDA_REGRESSION_OBJECTIVE_HPP_
#define LIGHTGBM_OBJECTIVE_CUDA_CUDA_REGRESSION_OBJECTIVE_HPP_
#ifdef USE_CUDA_EXP
#ifdef USE_CUDA
#define GET_GRADIENTS_BLOCK_SIZE_REGRESSION (1024)
......@@ -135,5 +135,5 @@ class CUDARegressionPoissonLoss : public CUDARegressionObjectiveInterface<Regres
} // namespace LightGBM
#endif // USE_CUDA_EXP
#endif // USE_CUDA
#endif // LIGHTGBM_OBJECTIVE_CUDA_CUDA_REGRESSION_OBJECTIVE_HPP_
......@@ -18,8 +18,8 @@
namespace LightGBM {
ObjectiveFunction* ObjectiveFunction::CreateObjectiveFunction(const std::string& type, const Config& config) {
#ifdef USE_CUDA_EXP
if (config.device_type == std::string("cuda_exp") &&
#ifdef USE_CUDA
if (config.device_type == std::string("cuda") &&
config.data_sample_strategy != std::string("goss") &&
config.boosting != std::string("rf")) {
if (type == std::string("regression")) {
......@@ -27,7 +27,7 @@ ObjectiveFunction* ObjectiveFunction::CreateObjectiveFunction(const std::string&
} else if (type == std::string("regression_l1")) {
return new CUDARegressionL1loss(config);
} else if (type == std::string("quantile")) {
Log::Warning("Objective quantile is not implemented in cuda_exp version. Fall back to boosting on CPU.");
Log::Warning("Objective quantile is not implemented in cuda version. Fall back to boosting on CPU.");
return new RegressionQuantileloss(config);
} else if (type == std::string("huber")) {
return new CUDARegressionHuberLoss(config);
......@@ -46,26 +46,26 @@ ObjectiveFunction* ObjectiveFunction::CreateObjectiveFunction(const std::string&
} else if (type == std::string("multiclassova")) {
return new CUDAMulticlassOVA(config);
} else if (type == std::string("cross_entropy")) {
Log::Warning("Objective cross_entropy is not implemented in cuda_exp version. Fall back to boosting on CPU.");
Log::Warning("Objective cross_entropy is not implemented in cuda version. Fall back to boosting on CPU.");
return new CrossEntropy(config);
} else if (type == std::string("cross_entropy_lambda")) {
Log::Warning("Objective cross_entropy_lambda is not implemented in cuda_exp version. Fall back to boosting on CPU.");
Log::Warning("Objective cross_entropy_lambda is not implemented in cuda version. Fall back to boosting on CPU.");
return new CrossEntropyLambda(config);
} else if (type == std::string("mape")) {
Log::Warning("Objective mape is not implemented in cuda_exp version. Fall back to boosting on CPU.");
Log::Warning("Objective mape is not implemented in cuda version. Fall back to boosting on CPU.");
return new RegressionMAPELOSS(config);
} else if (type == std::string("gamma")) {
Log::Warning("Objective gamma is not implemented in cuda_exp version. Fall back to boosting on CPU.");
Log::Warning("Objective gamma is not implemented in cuda version. Fall back to boosting on CPU.");
return new RegressionGammaLoss(config);
} else if (type == std::string("tweedie")) {
Log::Warning("Objective tweedie is not implemented in cuda_exp version. Fall back to boosting on CPU.");
Log::Warning("Objective tweedie is not implemented in cuda version. Fall back to boosting on CPU.");
return new RegressionTweedieLoss(config);
} else if (type == std::string("custom")) {
Log::Warning("Using customized objective with cuda_exp. This requires copying gradients from CPU to GPU, which can be slow.");
Log::Warning("Using customized objective with cuda. This requires copying gradients from CPU to GPU, which can be slow.");
return nullptr;
}
} else {
#endif // USE_CUDA_EXP
#endif // USE_CUDA
if (type == std::string("regression")) {
return new RegressionL2loss(config);
} else if (type == std::string("regression_l1")) {
......@@ -101,9 +101,9 @@ ObjectiveFunction* ObjectiveFunction::CreateObjectiveFunction(const std::string&
} else if (type == std::string("custom")) {
return nullptr;
}
#ifdef USE_CUDA_EXP
#ifdef USE_CUDA
}
#endif // USE_CUDA_EXP
#endif // USE_CUDA
Log::Fatal("Unknown objective type name: %s", type.c_str());
return nullptr;
}
......
......@@ -4,7 +4,7 @@
* license information.
*/
#ifdef USE_CUDA_EXP
#ifdef USE_CUDA
#include <algorithm>
......@@ -383,4 +383,4 @@ void CUDABestSplitFinder::SetUsedFeatureByNode(const std::vector<int8_t>& is_fea
} // namespace LightGBM
#endif // USE_CUDA_EXP
#endif // USE_CUDA
......@@ -4,7 +4,7 @@
* license information.
*/
#ifdef USE_CUDA_EXP
#ifdef USE_CUDA
#include <algorithm>
......@@ -1802,4 +1802,4 @@ void CUDABestSplitFinder::LaunchInitCUDARandomKernel() {
} // namespace LightGBM
#endif // USE_CUDA_EXP
#endif // USE_CUDA
......@@ -7,7 +7,7 @@
#ifndef LIGHTGBM_TREELEARNER_CUDA_CUDA_BEST_SPLIT_FINDER_HPP_
#define LIGHTGBM_TREELEARNER_CUDA_CUDA_BEST_SPLIT_FINDER_HPP_
#ifdef USE_CUDA_EXP
#ifdef USE_CUDA
#include <LightGBM/bin.h>
#include <LightGBM/dataset.h>
......@@ -211,5 +211,5 @@ class CUDABestSplitFinder {
} // namespace LightGBM
#endif // USE_CUDA_EXP
#endif // USE_CUDA
#endif // LIGHTGBM_TREELEARNER_CUDA_CUDA_BEST_SPLIT_FINDER_HPP_
......@@ -4,7 +4,7 @@
* license information.
*/
#ifdef USE_CUDA_EXP
#ifdef USE_CUDA
#include <algorithm>
#include <memory>
......@@ -370,4 +370,4 @@ void CUDADataPartition::ResetByLeafPred(const std::vector<int>& leaf_pred, int n
} // namespace LightGBM
#endif // USE_CUDA_EXP
#endif // USE_CUDA
......@@ -4,7 +4,7 @@
* license information.
*/
#ifdef USE_CUDA_EXP
#ifdef USE_CUDA
#include "cuda_data_partition.hpp"
......@@ -1071,4 +1071,4 @@ void CUDADataPartition::LaunchAddPredictionToScoreKernel(const double* leaf_valu
} // namespace LightGBM
#endif // USE_CUDA_EXP
#endif // USE_CUDA
......@@ -6,7 +6,7 @@
#ifndef LIGHTGBM_TREELEARNER_CUDA_CUDA_DATA_PARTITION_HPP_
#define LIGHTGBM_TREELEARNER_CUDA_CUDA_DATA_PARTITION_HPP_
#ifdef USE_CUDA_EXP
#ifdef USE_CUDA
#include <LightGBM/bin.h>
#include <LightGBM/meta.h>
......@@ -384,5 +384,5 @@ class CUDADataPartition {
} // namespace LightGBM
#endif // USE_CUDA_EXP
#endif // USE_CUDA
#endif // LIGHTGBM_TREELEARNER_CUDA_CUDA_DATA_PARTITION_HPP_
......@@ -4,7 +4,7 @@
* license information.
*/
#ifdef USE_CUDA_EXP
#ifdef USE_CUDA
#include "cuda_histogram_constructor.hpp"
......@@ -193,4 +193,4 @@ void CUDAHistogramConstructor::ResetConfig(const Config* config) {
} // namespace LightGBM
#endif // USE_CUDA_EXP
#endif // USE_CUDA
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment