"diffusers/examples/dreambooth/README_hidream.md" did not exist on "c27b03391565eca1ca43510dc0f8e2f7cd4e8f8f"
Commit d7f8aa53 authored by James Lamb's avatar James Lamb Committed by Nikita Titov
Browse files

[docs] fixed miscellaneous typos in comments and documentation (#2496)

* fixed miscellaneous typos in documentation

* fix typo introduced in typo-fixing PR
parent ff4e48ce
......@@ -269,7 +269,7 @@ cb.early.stop <- function(stopping_rounds, verbose = TRUE) {
best_msg <- NULL
eval_len <- NULL
# Initalization function
# Initialization function
init <- function(env) {
# Store evaluation length
......
......@@ -855,7 +855,7 @@ dimnames.lgb.Dataset <- function(x) {
#' original \code{lgb.Dataset} object
#'
#' @param dataset Object of class \code{lgb.Dataset}
#' @param idxset a integer vector of indices of rows needed
#' @param idxset an integer vector of indices of rows needed
#' @param ... other parameters (currently not used)
#' @return constructed sub dataset
#'
......
......@@ -4,7 +4,7 @@
#'
#' @param model object of class \code{lgb.Booster}.
#' @param data a matrix object or a dgCMatrix object.
#' @param idxset a integer vector of indices of rows needed.
#' @param idxset an integer vector of indices of rows needed.
#' @param num_iteration number of iteration want to predict with, NULL or <= 0 means use best iteration.
#'
#' @return
......
......@@ -20,7 +20,7 @@ NULL
#' @title Train a LightGBM model
#' @name lightgbm
#' @description Simple interface for training an LightGBM model.
#' @description Simple interface for training a LightGBM model.
#' @inheritParams lgb_shared_params
#' @param label Vector of labels, used if \code{data} is not an \code{\link{lgb.Dataset}}
#' @param weight vector of response values. If not NULL, will set to dataset
......
......@@ -9,8 +9,8 @@ dtest <- lgb.Dataset.create.valid(dtrain, data = agaricus.test$data, label = aga
valids <- list(eval = dtest, train = dtrain)
#--------------------Advanced features ---------------------------
# advanced: start from a initial base prediction
print("Start running example to start from a initial prediction")
# advanced: start from an initial base prediction
print("Start running example to start from an initial prediction")
# Train lightgbm for 1 round
param <- list(num_leaves = 4,
......
......@@ -11,7 +11,7 @@ lgb.interprete(model, data, idxset, num_iteration = NULL)
\item{data}{a matrix object or a dgCMatrix object.}
\item{idxset}{a integer vector of indices of rows needed.}
\item{idxset}{an integer vector of indices of rows needed.}
\item{num_iteration}{number of iteration want to predict with, NULL or <= 0 means use best iteration.}
}
......
......@@ -60,5 +60,5 @@ List of callback functions that are applied at each iteration.}
}}
}
\description{
Simple interface for training an LightGBM model.
Simple interface for training a LightGBM model.
}
......@@ -14,7 +14,7 @@ slice(dataset, ...)
\item{...}{other parameters (currently not used)}
\item{idxset}{a integer vector of indices of rows needed}
\item{idxset}{an integer vector of indices of rows needed}
}
\value{
constructed sub dataset
......
......@@ -33,7 +33,7 @@ After installing the drivers you need to restart the server.
After about 30 seconds, the server should be up again.
If you are using a AMD GPU, you should download and install the `AMDGPU-Pro`_ driver and also install package ``ocl-icd-libopencl1`` and ``ocl-icd-opencl-dev``.
If you are using an AMD GPU, you should download and install the `AMDGPU-Pro`_ driver and also install package ``ocl-icd-libopencl1`` and ``ocl-icd-opencl-dev``.
Build LightGBM
--------------
......
......@@ -370,7 +370,7 @@ Learning Control Parameters
- used for the categorical features
- L2 regularization in categorcial split
- L2 regularization in categorical split
- ``cat_smooth`` :raw-html:`<a id="cat_smooth" title="Permalink to this parameter" href="#cat_smooth">&#x1F517;&#xFE0E;</a>`, default = ``10.0``, type = double, constraints: ``cat_smooth >= 0.0``
......
......@@ -33,7 +33,7 @@ class Application {
/*! \brief Destructor */
~Application();
/*! \brief To call this funciton to run application*/
/*! \brief To call this function to run application*/
inline void Run();
private:
......
......@@ -159,13 +159,13 @@ class BinMapper {
static int SizeForSpecificBin(int bin);
/*!
* \brief Seirilizing this object to buffer
* \brief Serializing this object to buffer
* \param buffer The destination
*/
void CopyTo(char* buffer) const;
/*!
* \brief Deserilizing this object from buffer
* \brief Deserializing this object from buffer
* \param buffer The source
*/
void CopyFrom(const char* buffer);
......@@ -205,7 +205,7 @@ class BinMapper {
std::unordered_map<int, unsigned int> categorical_2_bin_;
/*! \brief Mapper from bin to categorical */
std::vector<int> bin_2_categorical_;
/*! \brief minimal feature vaule */
/*! \brief minimal feature value */
double min_val_;
/*! \brief maximum feature value */
double max_val_;
......@@ -217,7 +217,7 @@ class BinMapper {
* \brief Interface for ordered bin data. efficient for construct histogram, especially for sparse bin
* There are 2 advantages by using ordered bin.
* 1. group the data by leafs to improve the cache hit.
* 2. only store the non-zero bin, which can speed up the histogram consturction for sparse features.
* 2. only store the non-zero bin, which can speed up the histogram construction for sparse features.
* However it brings additional cost: it need re-order the bins after every split, which will cost much for dense feature.
* So we only using ordered bin for sparse situations.
*/
......@@ -239,8 +239,8 @@ class OrderedBin {
* Note: Unlike Bin, OrderedBin doesn't use ordered gradients and ordered hessians.
* Because it is hard to know the relative index in one leaf for sparse bin, since we skipped zero bins.
* \param leaf Using which leaf's data to construct
* \param gradients Gradients, Note:non-oredered by leaf
* \param hessians Hessians, Note:non-oredered by leaf
* \param gradients Gradients, Note:non-ordered by leaf
* \param hessians Hessians, Note:non-ordered by leaf
* \param out Output Result
*/
virtual void ConstructHistogram(int leaf, const score_t* gradients,
......@@ -251,7 +251,7 @@ class OrderedBin {
* Note: Unlike Bin, OrderedBin doesn't use ordered gradients and ordered hessians.
* Because it is hard to know the relative index in one leaf for sparse bin, since we skipped zero bins.
* \param leaf Using which leaf's data to construct
* \param gradients Gradients, Note:non-oredered by leaf
* \param gradients Gradients, Note:non-ordered by leaf
* \param out Output Result
*/
virtual void ConstructHistogram(int leaf, const score_t* gradients, HistogramBinEntry* out) const = 0;
......@@ -379,7 +379,7 @@ class Bin {
* \brief Split data according to threshold, if bin <= threshold, will put into left(lte_indices), else put into right(gt_indices)
* \param min_bin min_bin of current used feature
* \param max_bin max_bin of current used feature
* \param default_bin defualt bin if bin not in [min_bin, max_bin]
* \param default_bin default bin if bin not in [min_bin, max_bin]
* \param missing_type missing type
* \param default_left missing bin will go to left child
* \param threshold The split threshold.
......@@ -398,7 +398,7 @@ class Bin {
* \brief Split data according to threshold, if bin <= threshold, will put into left(lte_indices), else put into right(gt_indices)
* \param min_bin min_bin of current used feature
* \param max_bin max_bin of current used feature
* \param default_bin defualt bin if bin not in [min_bin, max_bin]
* \param default_bin default bin if bin not in [min_bin, max_bin]
* \param threshold The split threshold.
* \param num_threshold Number of threshold
* \param data_indices Used data indices. After called this function. The less than or equal data indices will store on this object.
......
......@@ -832,7 +832,7 @@ LIGHTGBM_C_EXPORT int LGBM_BoosterPredictForMat(BoosterHandle handle,
double* out_result);
/*!
* \brief Make prediction for an new dataset. This method re-uses the internal predictor structure
* \brief Make prediction for a new dataset. This method re-uses the internal predictor structure
* from previous calls and is optimized for single row invocation.
* \note
* You should pre-allocate memory for ``out_result``:
......
......@@ -371,7 +371,7 @@ struct Config {
// check = >=0.0
// desc = used for the categorical features
// desc = L2 regularization in categorcial split
// desc = L2 regularization in categorical split
double cat_l2 = 10.0;
// check = >=0.0
......
......@@ -27,25 +27,25 @@ namespace LightGBM {
class DatasetLoader;
/*!
* \brief This class is used to store some meta(non-feature) data for training data,
* e.g. labels, weights, initial scores, qurey level informations.
* e.g. labels, weights, initial scores, query level informations.
*
* Some details:
* 1. Label, used for traning.
* 1. Label, used for training.
* 2. Weights, weighs of records, optional
* 3. Query Boundaries, necessary for lambdarank.
* The documents of i-th query is in [ query_boundarise[i], query_boundarise[i+1] )
* 4. Query Weights, auto calculate by weights and query_boundarise(if both of them are existed)
* the weight for i-th query is sum(query_boundarise[i] , .., query_boundarise[i+1]) / (query_boundarise[i + 1] - query_boundarise[i+1])
* 5. Initial score. optional. if exsitng, the model will boost from this score, otherwise will start from 0.
* The documents of i-th query is in [ query_boundaries[i], query_boundaries[i+1] )
* 4. Query Weights, auto calculate by weights and query_boundaries(if both of them are existed)
* the weight for i-th query is sum(query_boundaries[i] , .., query_boundaries[i+1]) / (query_boundaries[i + 1] - query_boundaries[i+1])
* 5. Initial score. optional. if existing, the model will boost from this score, otherwise will start from 0.
*/
class Metadata {
public:
/*!
* \brief Null costructor
* \brief Null constructor
*/
Metadata();
/*!
* \brief Initialization will load qurey level informations, since it is need for sampling data
* \brief Initialization will load query level informations, since it is need for sampling data
* \param data_filename Filename of data
* \param init_score_filename Filename of initial score
*/
......@@ -75,7 +75,7 @@ class Metadata {
/*!
* \brief Partition label by used indices
* \param used_indices Indice of local used
* \param used_indices Indices of local used
*/
void PartitionLabel(const std::vector<data_size_t>& used_indices);
......@@ -268,7 +268,7 @@ class Parser {
virtual int NumFeatures() const = 0;
/*!
* \brief Create a object of parser, will auto choose the format depend on file
* \brief Create an object of parser, will auto choose the format depend on file
* \param filename One Filename of data
* \param num_features Pass num_features of this data file if you know, <=0 means don't know
* \param label_idx index of label column
......@@ -278,7 +278,7 @@ class Parser {
};
/*! \brief The main class of data set,
* which are used to traning or validation
* which are used to training or validation
*/
class Dataset {
public:
......
......@@ -13,9 +13,9 @@
/*!
* \brief get string message of the last error
* all functions in this file will return 0 on success
* and -1 when an error occured
* \return err_msg error inforomation
* \return error inforomation
* and -1 when an error occurred
* \return err_msg error information
* \return error information
*/
LIGHTGBM_C_EXPORT LGBM_SE LGBM_GetLastError_R(LGBM_SE buf_len, LGBM_SE actual_len, LGBM_SE err_msg);
......@@ -117,7 +117,7 @@ LIGHTGBM_C_EXPORT LGBM_SE LGBM_DatasetGetFeatureNames_R(LGBM_SE handle,
LGBM_SE call_state);
/*!
* \brief save dateset to binary file
* \brief save dataset to binary file
* \param handle an instance of dataset
* \param filename file name
* \return 0 when succeed, -1 when failure happens
......@@ -176,7 +176,7 @@ LIGHTGBM_C_EXPORT LGBM_SE LGBM_DatasetGetField_R(LGBM_SE handle,
/*!
* \brief Update parameters for a Dataset
* \param handle a instance of data matrix
* \param handle an instance of data matrix
* \param parameters parameters
* \return 0 when succeed, -1 when failure happens
*/
......@@ -207,10 +207,10 @@ LIGHTGBM_C_EXPORT LGBM_SE LGBM_DatasetGetNumFeature_R(LGBM_SE handle,
// --- start Booster interfaces
/*!
* \brief create an new boosting learner
* \brief create a new boosting learner
* \param train_data training data set
* \param parameters format: 'key1=value1 key2=value2'
* \prama out handle of created Booster
* \param out handle of created Booster
* \return 0 when succeed, -1 when failure happens
*/
LIGHTGBM_C_EXPORT LGBM_SE LGBM_BoosterCreate_R(LGBM_SE train_data,
......@@ -229,7 +229,7 @@ LIGHTGBM_C_EXPORT LGBM_SE LGBM_BoosterFree_R(LGBM_SE handle,
/*!
* \brief load an existing boosting from model file
* \param filename filename of model
* \prama out handle of created Booster
* \param out handle of created Booster
* \return 0 when succeed, -1 when failure happens
*/
LIGHTGBM_C_EXPORT LGBM_SE LGBM_BoosterCreateFromModelfile_R(LGBM_SE filename,
......@@ -351,7 +351,7 @@ LIGHTGBM_C_EXPORT LGBM_SE LGBM_BoosterGetEvalNames_R(LGBM_SE handle,
* \brief get evaluation for training data and validation data
* \param handle handle
* \param data_idx 0:training data, 1: 1st valid data, 2:2nd valid data ...
* \param out_result float arrary contains result
* \param out_result float array contains result
* \return 0 when succeed, -1 when failure happens
*/
LIGHTGBM_C_EXPORT LGBM_SE LGBM_BoosterGetEval_R(LGBM_SE handle,
......@@ -413,7 +413,7 @@ LIGHTGBM_C_EXPORT LGBM_SE LGBM_BoosterPredictForFile_R(LGBM_SE handle,
* \param is_rawscore
* \param is_leafidx
* \param num_iteration number of iteration for prediction, <= 0 means no limit
* \param out_len lenght of prediction
* \param out_len length of prediction
* \return 0 when succeed, -1 when failure happens
*/
LIGHTGBM_C_EXPORT LGBM_SE LGBM_BoosterCalcNumPredict_R(LGBM_SE handle,
......@@ -426,7 +426,7 @@ LIGHTGBM_C_EXPORT LGBM_SE LGBM_BoosterCalcNumPredict_R(LGBM_SE handle,
LGBM_SE call_state);
/*!
* \brief make prediction for an new data set
* \brief make prediction for a new data set
* Note: should pre-allocate memory for out_result,
* for normal and raw score: its length is equal to num_class * num_data
* for leaf index, its length is equal to num_class * num_data * num_iteration
......
......@@ -17,10 +17,10 @@ namespace LightGBM {
/*! \brief Type of data size, it is better to use signed type*/
typedef int32_t data_size_t;
// Enable following marco to use double for score_t
// Enable following macro to use double for score_t
// #define SCORE_T_USE_DOUBLE
// Enable following marco to use double for label_t
// Enable following macro to use double for label_t
// #define LABEL_T_USE_DOUBLE
/*! \brief Type of score, and gradients */
......
......@@ -38,7 +38,7 @@ class Metric {
virtual double factor_to_bigger_better() const = 0;
/*!
* \brief Calcaluting and printing metric result
* \brief Calculating and printing metric result
* \param score Current prediction score
*/
virtual std::vector<double> Eval(const double* score, const ObjectiveFunction* objective) const = 0;
......
......@@ -23,7 +23,7 @@ class BruckMap {
public:
/*! \brief The communication times for one all gather operation */
int k;
/*! \brief in_ranks[i] means the incomming rank on i-th communication */
/*! \brief in_ranks[i] means the incoming rank on i-th communication */
std::vector<int> in_ranks;
/*! \brief out_ranks[i] means the out rank on i-th communication */
std::vector<int> out_ranks;
......@@ -55,7 +55,7 @@ enum RecursiveHalvingNodeType {
/*! \brief Network structure for recursive halving algorithm */
class RecursiveHalvingMap {
public:
/*! \brief Communication times for one recursize halving algorithm */
/*! \brief Communication times for one recursive halving algorithm */
int k;
/*! \brief Node type */
RecursiveHalvingNodeType type;
......
......@@ -31,7 +31,7 @@ class Tree {
explicit Tree(int max_leaves);
/*!
* \brief Construtor, from a string
* \brief Constructor, from a string
* \param str Model string
* \param used_len used count of str
*/
......@@ -103,7 +103,7 @@ class Tree {
double* score) const;
/*!
* \brief Adding prediction value of this tree model to scorese
* \brief Adding prediction value of this tree model to scores
* \param data The dataset
* \param used_data_indices Indices of used data
* \param num_data Number of total data
......@@ -143,7 +143,7 @@ class Tree {
/*!
* \brief Shrinkage for the tree's output
* shrinkage rate (a.k.a learning rate) is used to tune the traning process
* shrinkage rate (a.k.a learning rate) is used to tune the training process
* \param rate The factor of shrinkage
*/
inline void Shrinkage(double rate) {
......@@ -334,7 +334,7 @@ class Tree {
double one_fraction;
// note that pweight is included for convenience and is not tied with the other attributes,
// the pweight of the i'th path element is the permuation weight of paths with i-1 ones in them
// the pweight of the i'th path element is the permutation weight of paths with i-1 ones in them
double pweight;
PathElement() {}
......@@ -354,12 +354,12 @@ class Tree {
/*! \brief Undo a previous extension of the decision path for TreeSHAP*/
static void UnwindPath(PathElement *unique_path, int unique_depth, int path_index);
/*! determine what the total permuation weight would be if we unwound a previous extension in the decision path*/
/*! determine what the total permutation weight would be if we unwound a previous extension in the decision path*/
static double UnwoundPathSum(const PathElement *unique_path, int unique_depth, int path_index);
/*! \brief Number of max leaves*/
int max_leaves_;
/*! \brief Number of current levas*/
/*! \brief Number of current leaves*/
int num_leaves_;
// following values used for non-leaf node
/*! \brief A non-leaf node's left child */
......@@ -379,7 +379,7 @@ class Tree {
std::vector<uint32_t> cat_threshold_inner_;
std::vector<int> cat_boundaries_;
std::vector<uint32_t> cat_threshold_;
/*! \brief Store the information for categorical feature handle and mising value handle. */
/*! \brief Store the information for categorical feature handle and missing value handle. */
std::vector<int8_t> decision_type_;
/*! \brief A non-leaf node's split gain */
std::vector<float> split_gain_;
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment