Unverified Commit 7880b79f authored by James Lamb's avatar James Lamb Committed by GitHub
Browse files

[docs] Change some 'parallel learning' references to 'distributed learning' (#4000)

* [docs] Change some 'parallel learning' references to 'distributed learning'

* found a few more

* one more reference
parent 0ee4d37f
......@@ -374,7 +374,7 @@ void Config::CheckParamConflict() {
}
if (is_parallel && (monotone_constraints_method == std::string("intermediate") || monotone_constraints_method == std::string("advanced"))) {
// In distributed mode, local node doesn't have histograms on all features, cannot perform "intermediate" monotone constraints.
Log::Warning("Cannot use \"intermediate\" or \"advanced\" monotone constraints in parallel learning, auto set to \"basic\" method.");
Log::Warning("Cannot use \"intermediate\" or \"advanced\" monotone constraints in distributed learning, auto set to \"basic\" method.");
monotone_constraints_method = "basic";
}
if (feature_fraction_bynode != 1.0 && (monotone_constraints_method == std::string("intermediate") || monotone_constraints_method == std::string("advanced"))) {
......
......@@ -180,10 +180,10 @@ void CheckSampleSize(size_t sample_cnt, size_t num_data) {
}
Dataset* DatasetLoader::LoadFromFile(const char* filename, int rank, int num_machines) {
// don't support query id in data file when training in parallel
// don't support query id in data file when using distributed training
if (num_machines > 1 && !config_.pre_partition) {
if (group_idx_ > 0) {
Log::Fatal("Using a query id without pre-partitioning the data file is not supported for parallel training.\n"
Log::Fatal("Using a query id without pre-partitioning the data file is not supported for distributed training.\n"
"Please use an additional query file or pre-partition the data");
}
}
......
......@@ -22,7 +22,7 @@ Metadata::Metadata() {
void Metadata::Init(const char* data_filename) {
data_filename_ = data_filename;
// for lambdarank, it needs query data for partition data in parallel learning
// for lambdarank, it needs query data for partition data in distributed learning
LoadQueryBoundaries();
LoadWeights();
LoadQueryWeights();
......@@ -187,7 +187,7 @@ void Metadata::CheckOrPartition(data_size_t num_all_data, const std::vector<data
}
} else {
if (!queries_.empty()) {
Log::Fatal("Cannot used query_id for parallel training");
Log::Fatal("Cannot used query_id for distributed training");
}
data_size_t num_used_data = static_cast<data_size_t>(used_data_indices.size());
// check weights
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment