Commit 20996c92 authored by Jeff Daily's avatar Jeff Daily
Browse files

partial revert of 61ec4f1a

Instead of replacing all #ifdef USE_CUDA, just add USE_CUDA define to ROCm build.
parent 1b3deb5f
......@@ -410,13 +410,13 @@ class MultiValSparseBin : public MultiValBin {
MultiValSparseBin<INDEX_T, VAL_T>* Clone() override;
#if defined(USE_CUDA) || defined(USE_ROCM)
#ifdef USE_CUDA
const void* GetRowWiseData(uint8_t* bit_type,
size_t* total_size,
bool* is_sparse,
const void** out_data_ptr,
uint8_t* data_ptr_bit_type) const override;
#endif // USE_CUDA || USE_ROCM
#endif // USE_CUDA
private:
data_size_t num_data_;
......
......@@ -503,9 +503,9 @@ void TrainingShareStates::CalcBinOffsets(const std::vector<std::unique_ptr<Featu
}
num_hist_total_bin_ = static_cast<int>(feature_hist_offsets_.back());
}
#if defined(USE_CUDA) || defined(USE_ROCM)
#ifdef USE_CUDA
column_hist_offsets_ = *offsets;
#endif // USE_CUDA || USE_ROCM
#endif // USE_CUDA
}
void TrainingShareStates::SetMultiValBin(MultiValBin* bin, data_size_t num_data,
......
......@@ -53,9 +53,9 @@ Tree::Tree(int max_leaves, bool track_branch_features, bool is_linear)
leaf_features_.resize(max_leaves_);
leaf_features_inner_.resize(max_leaves_);
}
#if defined(USE_CUDA) || defined(USE_ROCM)
#ifdef USE_CUDA
is_cuda_tree_ = false;
#endif // USE_CUDA || USE_ROCM
#endif // USE_CUDA
}
int Tree::Split(int leaf, int feature, int real_feature, uint32_t threshold_bin,
......@@ -740,9 +740,9 @@ Tree::Tree(const char* str, size_t* used_len) {
leaf_count_.resize(num_leaves_);
}
#if defined(USE_CUDA) || defined(USE_ROCM)
#ifdef USE_CUDA
is_cuda_tree_ = false;
#endif // USE_CUDA || USE_ROCM
#endif // USE_CUDA
if ((num_leaves_ <= 1) && !is_linear_) {
return;
......
......@@ -4,7 +4,7 @@
* license information.
*/
#if defined(USE_CUDA) || defined(USE_ROCM)
#ifdef USE_CUDA
#include "cuda_binary_metric.hpp"
......@@ -28,4 +28,4 @@ std::vector<double> CUDABinaryMetricInterface<HOST_METRIC, CUDA_METRIC>::Eval(co
} // namespace LightGBM
#endif // USE_CUDA || USE_ROCM
#endif // USE_CUDA
......@@ -7,7 +7,7 @@
#ifndef LIGHTGBM_METRIC_CUDA_CUDA_BINARY_METRIC_HPP_
#define LIGHTGBM_METRIC_CUDA_CUDA_BINARY_METRIC_HPP_
#if defined(USE_CUDA) || defined(USE_ROCM)
#ifdef USE_CUDA
#include <LightGBM/cuda/cuda_metric.hpp>
#include <LightGBM/cuda/cuda_utils.hu>
......@@ -52,6 +52,6 @@ class CUDABinaryLoglossMetric: public CUDABinaryMetricInterface<BinaryLoglossMet
} // namespace LightGBM
#endif // USE_CUDA || USE_ROCM
#endif // USE_CUDA
#endif // LIGHTGBM_METRIC_CUDA_CUDA_BINARY_METRIC_HPP_
......@@ -4,7 +4,7 @@
* license information.
*/
#if defined(USE_CUDA) || defined(USE_ROCM)
#ifdef USE_CUDA
#include "cuda_binary_metric.hpp"
#include "cuda_pointwise_metric.hpp"
......@@ -44,4 +44,4 @@ template void CUDAPointwiseMetricInterface<TweedieMetric, CUDATweedieMetric>::In
} // namespace LightGBM
#endif // USE_CUDA || USE_ROCM
#endif // USE_CUDA
......@@ -5,7 +5,7 @@
* Modifications Copyright(C) 2023 Advanced Micro Devices, Inc. All rights reserved.
*/
#if defined(USE_CUDA) || defined(USE_ROCM)
#ifdef USE_CUDA
#include <LightGBM/cuda/cuda_algorithms.hpp>
#include <LightGBM/cuda/cuda_rocm_interop.h>
......@@ -77,4 +77,4 @@ template void CUDAPointwiseMetricInterface<TweedieMetric, CUDATweedieMetric>::La
} // namespace LightGBM
#endif // USE_CUDA || USE_ROCM
#endif // USE_CUDA
......@@ -7,7 +7,7 @@
#ifndef LIGHTGBM_METRIC_CUDA_CUDA_POINTWISE_METRIC_HPP_
#define LIGHTGBM_METRIC_CUDA_CUDA_POINTWISE_METRIC_HPP_
#if defined(USE_CUDA) || defined(USE_ROCM)
#ifdef USE_CUDA
#include <LightGBM/cuda/cuda_metric.hpp>
#include <LightGBM/cuda/cuda_utils.hu>
......@@ -40,6 +40,6 @@ class CUDAPointwiseMetricInterface: public CUDAMetricInterface<HOST_METRIC> {
} // namespace LightGBM
#endif // USE_CUDA || USE_ROCM
#endif // USE_CUDA
#endif // LIGHTGBM_METRIC_CUDA_CUDA_POINTWISE_METRIC_HPP_
......@@ -4,7 +4,7 @@
* license information.
*/
#if defined(USE_CUDA) || defined(USE_ROCM)
#ifdef USE_CUDA
#include <vector>
......@@ -49,4 +49,4 @@ CUDATweedieMetric::CUDATweedieMetric(const Config& config): CUDARegressionMetric
} // namespace LightGBM
#endif // USE_CUDA || USE_ROCM
#endif // USE_CUDA
......@@ -7,7 +7,7 @@
#ifndef LIGHTGBM_METRIC_CUDA_CUDA_REGRESSION_METRIC_HPP_
#define LIGHTGBM_METRIC_CUDA_CUDA_REGRESSION_METRIC_HPP_
#if defined(USE_CUDA) || defined(USE_ROCM)
#ifdef USE_CUDA
#include <LightGBM/cuda/cuda_metric.hpp>
#include <LightGBM/cuda/cuda_utils.hu>
......@@ -210,6 +210,6 @@ class CUDATweedieMetric : public CUDARegressionMetricInterface<TweedieMetric, CU
} // namespace LightGBM
#endif // USE_CUDA || USE_ROCM
#endif // USE_CUDA
#endif // LIGHTGBM_METRIC_CUDA_CUDA_REGRESSION_METRIC_HPP_
......@@ -17,7 +17,7 @@
namespace LightGBM {
Metric* Metric::CreateMetric(const std::string& type, const Config& config) {
#if defined(USE_CUDA) || defined(USE_ROCM)
#ifdef USE_CUDA
if (config.device_type == std::string("cuda") && config.boosting == std::string("gbdt")) {
if (type == std::string("l2")) {
return new CUDAL2Metric(config);
......@@ -78,7 +78,7 @@ Metric* Metric::CreateMetric(const std::string& type, const Config& config) {
return new CUDATweedieMetric(config);
}
} else {
#endif // USE_CUDA || USE_ROCM
#endif // USE_CUDA
if (type == std::string("l2")) {
return new L2Metric(config);
} else if (type == std::string("rmse")) {
......@@ -126,9 +126,9 @@ Metric* Metric::CreateMetric(const std::string& type, const Config& config) {
} else if (type == std::string("tweedie")) {
return new TweedieMetric(config);
}
#if defined(USE_CUDA) || defined(USE_ROCM)
#ifdef USE_CUDA
}
#endif // USE_CUDA || USE_ROCM
#endif // USE_CUDA
return nullptr;
}
......
......@@ -4,7 +4,7 @@
* license information.
*/
#if defined(USE_CUDA) || defined(USE_ROCM)
#ifdef USE_CUDA
#include "cuda_binary_objective.hpp"
......@@ -61,4 +61,4 @@ void CUDABinaryLogloss::Init(const Metadata& metadata, data_size_t num_data) {
} // namespace LightGBM
#endif // USE_CUDA || USE_ROCM
#endif // USE_CUDA
......@@ -5,7 +5,7 @@
* Modifications Copyright(C) 2023 Advanced Micro Devices, Inc. All rights reserved.
*/
#if defined(USE_CUDA) || defined(USE_ROCM)
#ifdef USE_CUDA
#include "cuda_binary_objective.hpp"
......@@ -209,4 +209,4 @@ void CUDABinaryLogloss::LaunchResetOVACUDALabelKernel() const {
} // namespace LightGBM
#endif // USE_CUDA || USE_ROCM
#endif // USE_CUDA
......@@ -7,7 +7,7 @@
#ifndef LIGHTGBM_OBJECTIVE_CUDA_CUDA_BINARY_OBJECTIVE_HPP_
#define LIGHTGBM_OBJECTIVE_CUDA_CUDA_BINARY_OBJECTIVE_HPP_
#if defined(USE_CUDA) || defined(USE_ROCM)
#ifdef USE_CUDA
#define GET_GRADIENTS_BLOCK_SIZE_BINARY (1024)
#define CALC_INIT_SCORE_BLOCK_SIZE_BINARY (1024)
......@@ -58,6 +58,6 @@ class CUDABinaryLogloss : public CUDAObjectiveInterface<BinaryLogloss> {
} // namespace LightGBM
#endif // USE_CUDA || USE_ROCM
#endif // USE_CUDA
#endif // LIGHTGBM_OBJECTIVE_CUDA_CUDA_BINARY_OBJECTIVE_HPP_
......@@ -3,7 +3,7 @@
* Licensed under the MIT License. See LICENSE file in the project root for license information.
*/
#if defined(USE_CUDA) || defined(USE_ROCM)
#ifdef USE_CUDA
#include "cuda_multiclass_objective.hpp"
......@@ -59,4 +59,4 @@ const double* CUDAMulticlassOVA::ConvertOutputCUDA(const data_size_t num_data, c
} // namespace LightGBM
#endif // USE_CUDA || USE_ROCM
#endif // USE_CUDA
......@@ -3,7 +3,7 @@
* Licensed under the MIT License. See LICENSE file in the project root for license information.
*/
#if defined(USE_CUDA) || defined(USE_ROCM)
#ifdef USE_CUDA
#include <algorithm>
......@@ -105,4 +105,4 @@ const double* CUDAMulticlassSoftmax::LaunchConvertOutputCUDAKernel(
} // namespace LightGBM
#endif // USE_CUDA || USE_ROCM
#endif // USE_CUDA
......@@ -5,7 +5,7 @@
#ifndef LIGHTGBM_OBJECTIVE_CUDA_CUDA_MULTICLASS_OBJECTIVE_HPP_
#define LIGHTGBM_OBJECTIVE_CUDA_CUDA_MULTICLASS_OBJECTIVE_HPP_
#if defined(USE_CUDA) || defined(USE_ROCM)
#ifdef USE_CUDA
#include <LightGBM/cuda/cuda_objective_function.hpp>
......@@ -74,5 +74,5 @@ class CUDAMulticlassOVA: public CUDAObjectiveInterface<MulticlassOVA> {
} // namespace LightGBM
#endif // USE_CUDA || USE_ROCM
#endif // USE_CUDA
#endif // LIGHTGBM_OBJECTIVE_CUDA_CUDA_MULTICLASS_OBJECTIVE_HPP_
......@@ -4,7 +4,7 @@
* license information.
*/
#if defined(USE_CUDA) || defined(USE_ROCM)
#ifdef USE_CUDA
#include <string>
#include <vector>
......@@ -64,4 +64,4 @@ void CUDARankXENDCG::GenerateItemRands() const {
} // namespace LightGBM
#endif // USE_CUDA || USE_ROCM
#endif // USE_CUDA
......@@ -5,7 +5,7 @@
* Modifications Copyright(C) 2023 Advanced Micro Devices, Inc. All rights reserved.
*/
#if defined(USE_CUDA) || defined(USE_ROCM)
#ifdef USE_CUDA
#include "cuda_rank_objective.hpp"
......@@ -662,4 +662,4 @@ void CUDARankXENDCG::LaunchGetGradientsKernel(const double* score, score_t* grad
} // namespace LightGBM
#endif // USE_CUDA || USE_ROCM
#endif // USE_CUDA
......@@ -7,7 +7,7 @@
#ifndef LIGHTGBM_OBJECTIVE_CUDA_CUDA_RANK_OBJECTIVE_HPP_
#define LIGHTGBM_OBJECTIVE_CUDA_CUDA_RANK_OBJECTIVE_HPP_
#if defined(USE_CUDA) || defined(USE_ROCM)
#ifdef USE_CUDA
#define NUM_QUERY_PER_BLOCK (10)
......@@ -118,5 +118,5 @@ class CUDARankXENDCG : public CUDALambdaRankObjectiveInterface<RankXENDCG> {
} // namespace LightGBM
#endif // USE_CUDA || USE_ROCM
#endif // USE_CUDA
#endif // LIGHTGBM_OBJECTIVE_CUDA_CUDA_RANK_OBJECTIVE_HPP_
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment