Unverified Commit 6368375b authored by Nikita Titov's avatar Nikita Titov Committed by GitHub
Browse files

[ci][docs] fix link checking action by switching from linkchecker to lychee...

[ci][docs] fix link checking action by switching from linkchecker to lychee and update some links (#7027)
parent ce3e3121
......@@ -154,7 +154,7 @@ struct Config {
// descl2 = ``cross_entropy_lambda``, alternative parameterization of cross-entropy, aliases: ``xentlambda``
// descl2 = label is anything in interval [0, 1]
// desc = ranking application
// descl2 = ``lambdarank``, `lambdarank <https://proceedings.neurips.cc/paper_files/paper/2006/file/af44c4c56f385c43f2529f9b1b018f6a-Paper.pdf>`__ objective. `label_gain <#label_gain>`__ can be used to set the gain (weight) of ``int`` label and all values in ``label`` must be smaller than number of elements in ``label_gain``
// descl2 = ``lambdarank``, `lambdarank <https://proceedings.neurips.cc/paper/2006/hash/af44c4c56f385c43f2529f9b1b018f6a-Abstract.html>`__ objective. `label_gain <#label_gain>`__ can be used to set the gain (weight) of ``int`` label and all values in ``label`` must be smaller than number of elements in ``label_gain``
// descl2 = ``rank_xendcg``, `XE_NDCG_MART <https://arxiv.org/abs/1911.09798>`__ ranking objective function, aliases: ``xendcg``, ``xe_ndcg``, ``xe_ndcg_mart``, ``xendcg_mart``
// descl2 = ``rank_xendcg`` is faster than and achieves the similar performance as ``lambdarank``
// descl2 = label should be ``int`` type, and larger number represents the higher relevance (e.g. 0:bad, 1:fair, 2:good, 3:perfect)
......@@ -423,7 +423,7 @@ struct Config {
double lambda_l2 = 0.0;
// check = >=0.0
// desc = linear tree regularization, corresponds to the parameter ``lambda`` in Eq. 3 of `Gradient Boosting with Piece-Wise Linear Regression Trees <https://arxiv.org/pdf/1802.05640.pdf>`__
// desc = linear tree regularization, corresponds to the parameter ``lambda`` in Eq. 3 of `Gradient Boosting with Piece-Wise Linear Regression Trees <https://arxiv.org/abs/1802.05640>`__
double linear_lambda = 0.0;
// alias = min_split_gain
......@@ -706,7 +706,7 @@ struct Config {
bool is_enable_sparse = true;
// alias = is_enable_bundle, bundle
// desc = set this to ``false`` to disable Exclusive Feature Bundling (EFB), which is described in `LightGBM: A Highly Efficient Gradient Boosting Decision Tree <https://papers.nips.cc/paper_files/paper/2017/hash/6449f44a102fde848669bdd9eb6b76fa-Abstract.html>`__
// desc = set this to ``false`` to disable Exclusive Feature Bundling (EFB), which is described in `LightGBM: A Highly Efficient Gradient Boosting Decision Tree <https://proceedings.neurips.cc/paper/2017/hash/6449f44a102fde848669bdd9eb6b76fa-Abstract.html>`__
// desc = **Note**: disabling this may cause the slow training speed for sparse datasets
bool enable_bundle = true;
......@@ -977,7 +977,7 @@ struct Config {
// check = >0
// desc = used only in ``lambdarank`` application
// desc = controls the number of top-results to focus on during training, refer to "truncation level" in the Sec. 3 of `LambdaMART paper <https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/MSR-TR-2010-82.pdf>`__
// desc = controls the number of top-results to focus on during training, refer to "truncation level" in the Sec. 3 of `LambdaMART paper <https://www.microsoft.com/en-us/research/publication/from-ranknet-to-lambdarank-to-lambdamart-an-overview/>`__
// desc = this parameter is closely related to the desirable cutoff ``k`` in the metric **NDCG@k** that we aim at optimizing the ranker for. The optimal setting for this parameter is likely to be slightly higher than ``k`` (e.g., ``k + 3``) to include more pairs of documents to train on, but perhaps not too high to avoid deviating too much from the desired target metric **NDCG@k**
int lambdarank_truncation_level = 30;
......@@ -1030,7 +1030,7 @@ struct Config {
// descl2 = ``average_precision``, `average precision score <https://scikit-learn.org/stable/modules/generated/sklearn.metrics.average_precision_score.html>`__
// descl2 = ``binary_logloss``, `log loss <https://en.wikipedia.org/wiki/Cross_entropy>`__, aliases: ``binary``
// descl2 = ``binary_error``, for one sample: ``0`` for correct classification, ``1`` for error classification
// descl2 = ``auc_mu``, `AUC-mu <http://proceedings.mlr.press/v97/kleiman19a/kleiman19a.pdf>`__
// descl2 = ``auc_mu``, `AUC-mu <https://proceedings.mlr.press/v97/kleiman19a.html>`__
// descl2 = ``multi_logloss``, log loss for multi-class classification, aliases: ``multiclass``, ``softmax``, ``multiclassova``, ``multiclass_ova``, ``ova``, ``ovr``
// descl2 = ``multi_error``, error rate for multi-class classification
// descl2 = ``cross_entropy``, cross-entropy (with optional linear weights), aliases: ``xentropy``
......
......@@ -30,7 +30,7 @@ If you would like your AMD or Intel CPU to act like a GPU (for testing and debug
you can install `AMD APP SDK <https://github.com/microsoft/LightGBM/releases/download/v2.0.12/AMD-APP-SDKInstaller-v3.0.130.135-GA-windows-F-x64.exe>`_ on **Windows** and `PoCL <https://portablecl.org>`_ on **Linux**.
Many modern Linux distributions provide packages for PoCL, look for ``pocl-opencl-icd`` on Debian-based distributions and ``pocl`` on RedHat-based distributions.
For **Windows** users, `VC runtime <https://support.microsoft.com/en-us/help/2977003/the-latest-supported-visual-c-downloads>`_ is needed if **Visual Studio** is not installed.
For **Windows** users, `VC runtime <https://learn.microsoft.com/en-us/cpp/windows/latest-supported-vc-redist>`_ is needed if **Visual Studio** is not installed.
For **macOS** users, the **OpenMP** library is needed.
You can install it by the following command: ``brew install libomp``.
......
......@@ -237,7 +237,7 @@ class AucMuMetric : public Metric {
std::vector<double> Eval(const double* score, const ObjectiveFunction*) const override {
// the notation follows that used in the paper introducing the auc-mu metric:
// http://proceedings.mlr.press/v97/kleiman19a/kleiman19a.pdf
// https://proceedings.mlr.press/v97/kleiman19a.html
auto S = std::vector<std::vector<double>>(num_class_, std::vector<double>(num_class_, 0));
int i_start = 0;
......
......@@ -188,7 +188,7 @@ void LinearTreeLearner<TREE_LEARNER_TYPE>::CalculateLinear(Tree* tree, bool is_r
return;
}
// calculate coefficients using the method described in Eq 3 of https://arxiv.org/pdf/1802.05640.pdf
// calculate coefficients using the method described in Eq 3 of https://arxiv.org/abs/1802.05640
// the coefficients vector is given by
// - (X_T * H * X + lambda) ^ (-1) * (X_T * g)
// where:
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment