Unverified Commit 4f47547c authored by James Lamb's avatar James Lamb Committed by GitHub
Browse files

[CUDA] consolidate CUDA versions (#5677)



* [ci] speed up if-else, swig, and lint conda setup

* add 'source activate'

* python constraint

* start removing cuda v1

* comment out CI

* remove more references

* revert some unnecessaary changes

* revert a few more mistakes

* revert another change that ignored params

* sigh

* remove CUDATreeLearner

* fix tests, docs

* fix quoting in setup.py

* restore all CI

* Apply suggestions from code review
Co-authored-by: default avatarshiyu1994 <shiyu_k1994@qq.com>

* Apply suggestions from code review

* completely remove cuda_exp, update docs

---------
Co-authored-by: default avatarshiyu1994 <shiyu_k1994@qq.com>
parent 5ffd7571
......@@ -3,7 +3,7 @@
* Licensed under the MIT License. See LICENSE file in the project root for license information.
*/
#ifdef USE_CUDA_EXP
#ifdef USE_CUDA
#include <LightGBM/cuda/cuda_metadata.hpp>
......@@ -89,4 +89,4 @@ void CUDAMetadata::SetInitScore(const double* init_score, data_size_t len) {
} // namespace LightGBM
#endif // USE_CUDA_EXP
#endif // USE_CUDA
......@@ -3,7 +3,7 @@
* Licensed under the MIT License. See LICENSE file in the project root for license information.
*/
#ifdef USE_CUDA_EXP
#ifdef USE_CUDA
#include <LightGBM/cuda/cuda_row_data.hpp>
......@@ -474,4 +474,4 @@ template const uint64_t* CUDARowData::GetPartitionPtr<uint64_t>() const;
} // namespace LightGBM
#endif // USE_CUDA_EXP
#endif // USE_CUDA
......@@ -3,7 +3,7 @@
* Licensed under the MIT License. See LICENSE file in the project root for license information.
*/
#ifdef USE_CUDA_EXP
#ifdef USE_CUDA
#include <LightGBM/cuda/cuda_tree.hpp>
......@@ -337,4 +337,4 @@ void CUDATree::AsConstantTree(double val) {
} // namespace LightGBM
#endif // USE_CUDA_EXP
#endif // USE_CUDA
......@@ -4,7 +4,7 @@
*/
#ifdef USE_CUDA_EXP
#ifdef USE_CUDA
#include <LightGBM/cuda/cuda_tree.hpp>
......@@ -456,4 +456,4 @@ void CUDATree::LaunchAddPredictionToScoreKernel(
} // namespace LightGBM
#endif // USE_CUDA_EXP
#endif // USE_CUDA
......@@ -345,9 +345,9 @@ void Dataset::Construct(std::vector<std::unique_ptr<BinMapper>>* bin_mappers,
auto features_in_group = OneFeaturePerGroup(used_features);
auto is_sparse = io_config.is_enable_sparse;
if (io_config.device_type == std::string("cuda") || io_config.device_type == std::string("cuda_exp")) {
if (io_config.device_type == std::string("cuda")) {
LGBM_config_::current_device = lgbm_device_cuda;
if ((io_config.device_type == std::string("cuda") || io_config.device_type == std::string("cuda_exp")) && is_sparse) {
if ((io_config.device_type == std::string("cuda")) && is_sparse) {
Log::Warning("Using sparse features with CUDA is currently not supported.");
is_sparse = false;
}
......@@ -355,8 +355,7 @@ void Dataset::Construct(std::vector<std::unique_ptr<BinMapper>>* bin_mappers,
std::vector<int8_t> group_is_multi_val(used_features.size(), 0);
if (io_config.enable_bundle && !used_features.empty()) {
bool lgbm_is_gpu_used = io_config.device_type == std::string("gpu") || io_config.device_type == std::string("cuda")
|| io_config.device_type == std::string("cuda_exp");
bool lgbm_is_gpu_used = io_config.device_type == std::string("gpu") || io_config.device_type == std::string("cuda");
features_in_group = FastFeatureBundling(
*bin_mappers, sample_non_zero_indices, sample_values, num_per_col,
num_sample_col, static_cast<data_size_t>(total_sample_cnt),
......@@ -447,14 +446,14 @@ void Dataset::FinishLoad() {
}
metadata_.FinishLoad();
#ifdef USE_CUDA_EXP
if (device_type_ == std::string("cuda_exp")) {
#ifdef USE_CUDA
if (device_type_ == std::string("cuda")) {
CreateCUDAColumnData();
metadata_.CreateCUDAMetadata(gpu_device_id_);
} else {
cuda_column_data_.reset(nullptr);
}
#endif // USE_CUDA_EXP
#endif // USE_CUDA
is_finish_load_ = true;
}
......@@ -862,15 +861,15 @@ void Dataset::CopySubrow(const Dataset* fullset,
device_type_ = fullset->device_type_;
gpu_device_id_ = fullset->gpu_device_id_;
#ifdef USE_CUDA_EXP
if (device_type_ == std::string("cuda_exp")) {
#ifdef USE_CUDA
if (device_type_ == std::string("cuda")) {
if (cuda_column_data_ == nullptr) {
cuda_column_data_.reset(new CUDAColumnData(fullset->num_data(), gpu_device_id_));
metadata_.CreateCUDAMetadata(gpu_device_id_);
}
cuda_column_data_->CopySubrow(fullset->cuda_column_data(), used_indices, num_used_indices);
}
#endif // USE_CUDA_EXP
#endif // USE_CUDA
}
bool Dataset::SetFloatField(const char* field_name, const float* field_data,
......@@ -1508,13 +1507,13 @@ void Dataset::AddFeaturesFrom(Dataset* other) {
raw_data_.push_back(other->raw_data_[i]);
}
}
#ifdef USE_CUDA_EXP
if (device_type_ == std::string("cuda_exp")) {
#ifdef USE_CUDA
if (device_type_ == std::string("cuda")) {
CreateCUDAColumnData();
} else {
cuda_column_data_ = nullptr;
}
#endif // USE_CUDA_EXP
#endif // USE_CUDA
}
const void* Dataset::GetColWiseData(
......@@ -1536,7 +1535,7 @@ const void* Dataset::GetColWiseData(
return feature_groups_[feature_group_index]->GetColWiseData(sub_feature_index, bit_type, is_sparse, bin_iterator);
}
#ifdef USE_CUDA_EXP
#ifdef USE_CUDA
void Dataset::CreateCUDAColumnData() {
cuda_column_data_.reset(new CUDAColumnData(num_data_, gpu_device_id_));
int num_columns = 0;
......@@ -1671,6 +1670,6 @@ void Dataset::CreateCUDAColumnData() {
feature_to_column);
}
#endif // USE_CUDA_EXP
#endif // USE_CUDA
} // namespace LightGBM
......@@ -279,14 +279,14 @@ Dataset* DatasetLoader::LoadFromFile(const char* filename, int rank, int num_mac
dataset->device_type_ = config_.device_type;
dataset->gpu_device_id_ = config_.gpu_device_id;
#ifdef USE_CUDA_EXP
if (config_.device_type == std::string("cuda_exp")) {
#ifdef USE_CUDA
if (config_.device_type == std::string("cuda")) {
dataset->CreateCUDAColumnData();
dataset->metadata_.CreateCUDAMetadata(dataset->gpu_device_id_);
} else {
dataset->cuda_column_data_ = nullptr;
}
#endif // USE_CUDA_EXP
#endif // USE_CUDA
}
// check meta data
dataset->metadata_.CheckOrPartition(num_global_data, used_data_indices);
......
......@@ -467,7 +467,7 @@ class DenseBin : public Bin {
private:
data_size_t num_data_;
#if defined(USE_CUDA) || defined(USE_CUDA_EXP)
#ifdef USE_CUDA
std::vector<VAL_T, CHAllocator<VAL_T>> data_;
#else
std::vector<VAL_T, Common::AlignmentAllocator<VAL_T, kAlignedSize>> data_;
......
......@@ -18,9 +18,9 @@ Metadata::Metadata() {
weight_load_from_file_ = false;
query_load_from_file_ = false;
init_score_load_from_file_ = false;
#ifdef USE_CUDA_EXP
#ifdef USE_CUDA
cuda_metadata_ = nullptr;
#endif // USE_CUDA_EXP
#endif // USE_CUDA
}
void Metadata::Init(const char* data_filename) {
......@@ -344,11 +344,11 @@ void Metadata::SetInitScore(const double* init_score, data_size_t len) {
init_score_[i] = Common::AvoidInf(init_score[i]);
}
init_score_load_from_file_ = false;
#ifdef USE_CUDA_EXP
#ifdef USE_CUDA
if (cuda_metadata_ != nullptr) {
cuda_metadata_->SetInitScore(init_score_.data(), len);
}
#endif // USE_CUDA_EXP
#endif // USE_CUDA
}
void Metadata::InsertInitScores(const double* init_scores, data_size_t start_index, data_size_t len, data_size_t source_size) {
......@@ -387,11 +387,11 @@ void Metadata::SetLabel(const label_t* label, data_size_t len) {
for (data_size_t i = 0; i < num_data_; ++i) {
label_[i] = Common::AvoidInf(label[i]);
}
#ifdef USE_CUDA_EXP
#ifdef USE_CUDA
if (cuda_metadata_ != nullptr) {
cuda_metadata_->SetLabel(label_.data(), len);
}
#endif // USE_CUDA_EXP
#endif // USE_CUDA
}
void Metadata::InsertLabels(const label_t* labels, data_size_t start_index, data_size_t len) {
......@@ -428,11 +428,11 @@ void Metadata::SetWeights(const label_t* weights, data_size_t len) {
}
CalculateQueryWeights();
weight_load_from_file_ = false;
#ifdef USE_CUDA_EXP
#ifdef USE_CUDA
if (cuda_metadata_ != nullptr) {
cuda_metadata_->SetWeights(weights_.data(), len);
}
#endif // USE_CUDA_EXP
#endif // USE_CUDA
}
void Metadata::InsertWeights(const label_t* weights, data_size_t start_index, data_size_t len) {
......@@ -477,7 +477,7 @@ void Metadata::SetQuery(const data_size_t* query, data_size_t len) {
}
CalculateQueryWeights();
query_load_from_file_ = false;
#ifdef USE_CUDA_EXP
#ifdef USE_CUDA
if (cuda_metadata_ != nullptr) {
if (query_weights_.size() > 0) {
CHECK_EQ(query_weights_.size(), static_cast<size_t>(num_queries_));
......@@ -486,7 +486,7 @@ void Metadata::SetQuery(const data_size_t* query, data_size_t len) {
cuda_metadata_->SetQuery(query_boundaries_.data(), nullptr, num_queries_);
}
}
#endif // USE_CUDA_EXP
#endif // USE_CUDA
}
void Metadata::InsertQueries(const data_size_t* queries, data_size_t start_index, data_size_t len) {
......@@ -635,12 +635,12 @@ void Metadata::FinishLoad() {
CalculateQueryBoundaries();
}
#ifdef USE_CUDA_EXP
#ifdef USE_CUDA
void Metadata::CreateCUDAMetadata(const int gpu_device_id) {
cuda_metadata_.reset(new CUDAMetadata(gpu_device_id));
cuda_metadata_->Init(label_, weights_, query_boundaries_, query_weights_, init_score_);
}
#endif // USE_CUDA_EXP
#endif // USE_CUDA
void Metadata::LoadFromMemory(const void* memory) {
const char* mem_ptr = reinterpret_cast<const char*>(memory);
......
......@@ -211,13 +211,13 @@ class MultiValDenseBin : public MultiValBin {
MultiValDenseBin<VAL_T>* Clone() override;
#ifdef USE_CUDA_EXP
#ifdef USE_CUDA
const void* GetRowWiseData(uint8_t* bit_type,
size_t* total_size,
bool* is_sparse,
const void** out_data_ptr,
uint8_t* data_ptr_bit_type) const override;
#endif // USE_CUDA_EXP
#endif // USE_CUDA
private:
data_size_t num_data_;
......
......@@ -292,13 +292,13 @@ class MultiValSparseBin : public MultiValBin {
MultiValSparseBin<INDEX_T, VAL_T>* Clone() override;
#ifdef USE_CUDA_EXP
#ifdef USE_CUDA
const void* GetRowWiseData(uint8_t* bit_type,
size_t* total_size,
bool* is_sparse,
const void** out_data_ptr,
uint8_t* data_ptr_bit_type) const override;
#endif // USE_CUDA_EXP
#endif // USE_CUDA
private:
data_size_t num_data_;
......
......@@ -382,9 +382,9 @@ void TrainingShareStates::CalcBinOffsets(const std::vector<std::unique_ptr<Featu
}
num_hist_total_bin_ = static_cast<int>(feature_hist_offsets_.back());
}
#ifdef USE_CUDA_EXP
#ifdef USE_CUDA
column_hist_offsets_ = *offsets;
#endif // USE_CUDA_EXP
#endif // USE_CUDA
}
void TrainingShareStates::SetMultiValBin(MultiValBin* bin, data_size_t num_data,
......
......@@ -53,9 +53,9 @@ Tree::Tree(int max_leaves, bool track_branch_features, bool is_linear)
leaf_features_.resize(max_leaves_);
leaf_features_inner_.resize(max_leaves_);
}
#ifdef USE_CUDA_EXP
#ifdef USE_CUDA
is_cuda_tree_ = false;
#endif // USE_CUDA_EXP
#endif // USE_CUDA
}
int Tree::Split(int leaf, int feature, int real_feature, uint32_t threshold_bin,
......@@ -731,9 +731,9 @@ Tree::Tree(const char* str, size_t* used_len) {
is_linear_ = false;
}
#ifdef USE_CUDA_EXP
#ifdef USE_CUDA
is_cuda_tree_ = false;
#endif // USE_CUDA_EXP
#endif // USE_CUDA
if ((num_leaves_ <= 1) && !is_linear_) {
return;
......
......@@ -4,7 +4,7 @@
* license information.
*/
#ifdef USE_CUDA_EXP
#ifdef USE_CUDA
#include "cuda_binary_metric.hpp"
......@@ -28,4 +28,4 @@ std::vector<double> CUDABinaryMetricInterface<HOST_METRIC, CUDA_METRIC>::Eval(co
} // namespace LightGBM
#endif // USE_CUDA_EXP
#endif // USE_CUDA
......@@ -7,7 +7,7 @@
#ifndef LIGHTGBM_METRIC_CUDA_CUDA_BINARY_METRIC_HPP_
#define LIGHTGBM_METRIC_CUDA_CUDA_BINARY_METRIC_HPP_
#ifdef USE_CUDA_EXP
#ifdef USE_CUDA
#include <LightGBM/cuda/cuda_metric.hpp>
#include <LightGBM/cuda/cuda_utils.h>
......@@ -52,6 +52,6 @@ class CUDABinaryLoglossMetric: public CUDABinaryMetricInterface<BinaryLoglossMet
} // namespace LightGBM
#endif // USE_CUDA_EXP
#endif // USE_CUDA
#endif // LIGHTGBM_METRIC_CUDA_CUDA_BINARY_METRIC_HPP_
......@@ -4,7 +4,7 @@
* license information.
*/
#ifdef USE_CUDA_EXP
#ifdef USE_CUDA
#include "cuda_binary_metric.hpp"
#include "cuda_pointwise_metric.hpp"
......@@ -35,4 +35,4 @@ template void CUDAPointwiseMetricInterface<BinaryLoglossMetric, CUDABinaryLoglos
} // namespace LightGBM
#endif // USE_CUDA_EXP
#endif // USE_CUDA
......@@ -4,7 +4,7 @@
* license information.
*/
#ifdef USE_CUDA_EXP
#ifdef USE_CUDA
#include <LightGBM/cuda/cuda_algorithms.hpp>
......@@ -66,4 +66,4 @@ template void CUDAPointwiseMetricInterface<BinaryLoglossMetric, CUDABinaryLoglos
} // namespace LightGBM
#endif // USE_CUDA_EXP
#endif // USE_CUDA
......@@ -7,7 +7,7 @@
#ifndef LIGHTGBM_METRIC_CUDA_CUDA_POINTWISE_METRIC_HPP_
#define LIGHTGBM_METRIC_CUDA_CUDA_POINTWISE_METRIC_HPP_
#ifdef USE_CUDA_EXP
#ifdef USE_CUDA
#include <LightGBM/cuda/cuda_metric.hpp>
#include <LightGBM/cuda/cuda_utils.h>
......@@ -38,6 +38,6 @@ class CUDAPointwiseMetricInterface: public CUDAMetricInterface<HOST_METRIC> {
} // namespace LightGBM
#endif // USE_CUDA_EXP
#endif // USE_CUDA
#endif // LIGHTGBM_METRIC_CUDA_CUDA_POINTWISE_METRIC_HPP_
......@@ -4,7 +4,7 @@
* license information.
*/
#ifdef USE_CUDA_EXP
#ifdef USE_CUDA
#include <vector>
......@@ -31,4 +31,4 @@ CUDAL2Metric::CUDAL2Metric(const Config& config): CUDARegressionMetricInterface<
} // namespace LightGBM
#endif // USE_CUDA_EXP
#endif // USE_CUDA
......@@ -7,7 +7,7 @@
#ifndef LIGHTGBM_METRIC_CUDA_CUDA_REGRESSION_METRIC_HPP_
#define LIGHTGBM_METRIC_CUDA_CUDA_REGRESSION_METRIC_HPP_
#ifdef USE_CUDA_EXP
#ifdef USE_CUDA
#include <LightGBM/cuda/cuda_metric.hpp>
#include <LightGBM/cuda/cuda_utils.h>
......@@ -54,6 +54,6 @@ class CUDAL2Metric : public CUDARegressionMetricInterface<L2Metric, CUDAL2Metric
} // namespace LightGBM
#endif // USE_CUDA_EXP
#endif // USE_CUDA
#endif // LIGHTGBM_METRIC_CUDA_CUDA_REGRESSION_METRIC_HPP_
......@@ -17,77 +17,77 @@
namespace LightGBM {
Metric* Metric::CreateMetric(const std::string& type, const Config& config) {
#ifdef USE_CUDA_EXP
if (config.device_type == std::string("cuda_exp") && config.boosting == std::string("gbdt")) {
#ifdef USE_CUDA
if (config.device_type == std::string("cuda") && config.boosting == std::string("gbdt")) {
if (type == std::string("l2")) {
return new CUDAL2Metric(config);
} else if (type == std::string("rmse")) {
return new CUDARMSEMetric(config);
} else if (type == std::string("l1")) {
Log::Warning("Metric l1 is not implemented in cuda_exp version. Fall back to evaluation on CPU.");
Log::Warning("Metric l1 is not implemented in cuda version. Fall back to evaluation on CPU.");
return new L1Metric(config);
} else if (type == std::string("quantile")) {
Log::Warning("Metric quantile is not implemented in cuda_exp version. Fall back to evaluation on CPU.");
Log::Warning("Metric quantile is not implemented in cuda version. Fall back to evaluation on CPU.");
return new QuantileMetric(config);
} else if (type == std::string("huber")) {
Log::Warning("Metric huber is not implemented in cuda_exp version. Fall back to evaluation on CPU.");
Log::Warning("Metric huber is not implemented in cuda version. Fall back to evaluation on CPU.");
return new HuberLossMetric(config);
} else if (type == std::string("fair")) {
Log::Warning("Metric fair is not implemented in cuda_exp version. Fall back to evaluation on CPU.");
Log::Warning("Metric fair is not implemented in cuda version. Fall back to evaluation on CPU.");
return new FairLossMetric(config);
} else if (type == std::string("poisson")) {
Log::Warning("Metric poisson is not implemented in cuda_exp version. Fall back to evaluation on CPU.");
Log::Warning("Metric poisson is not implemented in cuda version. Fall back to evaluation on CPU.");
return new PoissonMetric(config);
} else if (type == std::string("binary_logloss")) {
return new CUDABinaryLoglossMetric(config);
} else if (type == std::string("binary_error")) {
Log::Warning("Metric binary_error is not implemented in cuda_exp version. Fall back to evaluation on CPU.");
Log::Warning("Metric binary_error is not implemented in cuda version. Fall back to evaluation on CPU.");
return new BinaryErrorMetric(config);
} else if (type == std::string("auc")) {
Log::Warning("Metric auc is not implemented in cuda_exp version. Fall back to evaluation on CPU.");
Log::Warning("Metric auc is not implemented in cuda version. Fall back to evaluation on CPU.");
return new AUCMetric(config);
} else if (type == std::string("average_precision")) {
Log::Warning("Metric average_precision is not implemented in cuda_exp version. Fall back to evaluation on CPU.");
Log::Warning("Metric average_precision is not implemented in cuda version. Fall back to evaluation on CPU.");
return new AveragePrecisionMetric(config);
} else if (type == std::string("auc_mu")) {
Log::Warning("Metric auc_mu is not implemented in cuda_exp version. Fall back to evaluation on CPU.");
Log::Warning("Metric auc_mu is not implemented in cuda version. Fall back to evaluation on CPU.");
return new AucMuMetric(config);
} else if (type == std::string("ndcg")) {
Log::Warning("Metric ndcg is not implemented in cuda_exp version. Fall back to evaluation on CPU.");
Log::Warning("Metric ndcg is not implemented in cuda version. Fall back to evaluation on CPU.");
return new NDCGMetric(config);
} else if (type == std::string("map")) {
Log::Warning("Metric map is not implemented in cuda_exp version. Fall back to evaluation on CPU.");
Log::Warning("Metric map is not implemented in cuda version. Fall back to evaluation on CPU.");
return new MapMetric(config);
} else if (type == std::string("multi_logloss")) {
Log::Warning("Metric multi_logloss is not implemented in cuda_exp version. Fall back to evaluation on CPU.");
Log::Warning("Metric multi_logloss is not implemented in cuda version. Fall back to evaluation on CPU.");
return new MultiSoftmaxLoglossMetric(config);
} else if (type == std::string("multi_error")) {
Log::Warning("Metric multi_error is not implemented in cuda_exp version. Fall back to evaluation on CPU.");
Log::Warning("Metric multi_error is not implemented in cuda version. Fall back to evaluation on CPU.");
return new MultiErrorMetric(config);
} else if (type == std::string("cross_entropy")) {
Log::Warning("Metric cross_entropy is not implemented in cuda_exp version. Fall back to evaluation on CPU.");
Log::Warning("Metric cross_entropy is not implemented in cuda version. Fall back to evaluation on CPU.");
return new CrossEntropyMetric(config);
} else if (type == std::string("cross_entropy_lambda")) {
Log::Warning("Metric cross_entropy_lambda is not implemented in cuda_exp version. Fall back to evaluation on CPU.");
Log::Warning("Metric cross_entropy_lambda is not implemented in cuda version. Fall back to evaluation on CPU.");
return new CrossEntropyLambdaMetric(config);
} else if (type == std::string("kullback_leibler")) {
Log::Warning("Metric kullback_leibler is not implemented in cuda_exp version. Fall back to evaluation on CPU.");
Log::Warning("Metric kullback_leibler is not implemented in cuda version. Fall back to evaluation on CPU.");
return new KullbackLeiblerDivergence(config);
} else if (type == std::string("mape")) {
Log::Warning("Metric mape is not implemented in cuda_exp version. Fall back to evaluation on CPU.");
Log::Warning("Metric mape is not implemented in cuda version. Fall back to evaluation on CPU.");
return new MAPEMetric(config);
} else if (type == std::string("gamma")) {
Log::Warning("Metric gamma is not implemented in cuda_exp version. Fall back to evaluation on CPU.");
Log::Warning("Metric gamma is not implemented in cuda version. Fall back to evaluation on CPU.");
return new GammaMetric(config);
} else if (type == std::string("gamma_deviance")) {
Log::Warning("Metric gamma_deviance is not implemented in cuda_exp version. Fall back to evaluation on CPU.");
Log::Warning("Metric gamma_deviance is not implemented in cuda version. Fall back to evaluation on CPU.");
return new GammaDevianceMetric(config);
} else if (type == std::string("tweedie")) {
Log::Warning("Metric tweedie is not implemented in cuda_exp version. Fall back to evaluation on CPU.");
Log::Warning("Metric tweedie is not implemented in cuda version. Fall back to evaluation on CPU.");
return new TweedieMetric(config);
}
} else {
#endif // USE_CUDA_EXP
#endif // USE_CUDA
if (type == std::string("l2")) {
return new L2Metric(config);
} else if (type == std::string("rmse")) {
......@@ -135,9 +135,9 @@ Metric* Metric::CreateMetric(const std::string& type, const Config& config) {
} else if (type == std::string("tweedie")) {
return new TweedieMetric(config);
}
#ifdef USE_CUDA_EXP
#ifdef USE_CUDA
}
#endif // USE_CUDA_EXP
#endif // USE_CUDA
return nullptr;
}
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment