Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
dlib
Commits
f152a78a
"vscode:/vscode.git/clone" did not exist on "5ccd775165b5e221f040da94319ca50374d3b57f"
Commit
f152a78a
authored
Mar 28, 2021
by
Davis King
Browse files
updated docs
parent
a44ddd74
Changes
5
Show whitespace changes
Inline
Side-by-side
Showing
5 changed files
with
107 additions
and
2 deletions
+107
-2
docs/docs/algorithms.xml
docs/docs/algorithms.xml
+28
-0
docs/docs/linear_algebra.xml
docs/docs/linear_algebra.xml
+8
-0
docs/docs/ml.xml
docs/docs/ml.xml
+28
-0
docs/docs/release_notes.xml
docs/docs/release_notes.xml
+34
-1
docs/docs/term_index.xml
docs/docs/term_index.xml
+9
-1
No files found.
docs/docs/algorithms.xml
View file @
f152a78a
...
@@ -76,6 +76,8 @@
...
@@ -76,6 +76,8 @@
<item>
count_steps_without_decrease_robust
</item>
<item>
count_steps_without_decrease_robust
</item>
<item>
count_steps_without_decrease
</item>
<item>
count_steps_without_decrease
</item>
<item>
count_steps_without_increase
</item>
<item>
count_steps_without_increase
</item>
<item>
probability_values_are_increasing
</item>
<item>
probability_values_are_increasing_robust
</item>
<item>
binomial_random_vars_are_different
</item>
<item>
binomial_random_vars_are_different
</item>
<item>
event_correlation
</item>
<item>
event_correlation
</item>
...
@@ -752,6 +754,32 @@
...
@@ -752,6 +754,32 @@
</description>
</description>
</component>
</component>
<!-- ************************************************************************* -->
<component>
<name>
probability_values_are_increasing
</name>
<file>
dlib/statistics/running_gradient.h
</file>
<spec_file
link=
"true"
>
dlib/statistics/running_gradient_abstract.h
</spec_file>
<description>
Given a potentially noisy time series, this function returns the probability that those
values are increasing in magnitude.
</description>
</component>
<!-- ************************************************************************* -->
<component>
<name>
probability_values_are_increasing_robust
</name>
<file>
dlib/statistics/running_gradient.h
</file>
<spec_file
link=
"true"
>
dlib/statistics/running_gradient_abstract.h
</spec_file>
<description>
This function behaves just like
<a
href=
"#probability_values_are_increasing"
>
probability_values_are_increasing
</a>
except
that it ignores times series values that are anomalously large. This makes it
robust to sudden noisy but transient spikes in the time series values.
</description>
</component>
<!-- ************************************************************************* -->
<!-- ************************************************************************* -->
<component>
<component>
...
...
docs/docs/linear_algebra.xml
View file @
f152a78a
...
@@ -163,6 +163,10 @@
...
@@ -163,6 +163,10 @@
<name>
tanh
</name>
<name>
tanh
</name>
<link>
dlib/matrix/matrix_math_functions_abstract.h.html#tanh
</link>
<link>
dlib/matrix/matrix_math_functions_abstract.h.html#tanh
</link>
</item>
</item>
<item>
<name>
soft_max
</name>
<link>
dlib/matrix/matrix_math_functions_abstract.h.html#soft_max
</link>
</item>
</sub>
</sub>
</item>
</item>
<item
nolink=
"true"
>
<item
nolink=
"true"
>
...
@@ -549,6 +553,10 @@
...
@@ -549,6 +553,10 @@
<name>
pointwise_multiply
</name>
<name>
pointwise_multiply
</name>
<link>
dlib/matrix/matrix_utilities_abstract.h.html#pointwise_multiply
</link>
<link>
dlib/matrix/matrix_utilities_abstract.h.html#pointwise_multiply
</link>
</item>
</item>
<item>
<name>
pointwise_pow
</name>
<link>
dlib/matrix/matrix_utilities_abstract.h.html#pointwise_pow
</link>
</item>
<item>
<item>
<name>
join_rows
</name>
<name>
join_rows
</name>
<link>
dlib/matrix/matrix_utilities_abstract.h.html#join_rows
</link>
<link>
dlib/matrix/matrix_utilities_abstract.h.html#join_rows
</link>
...
...
docs/docs/ml.xml
View file @
f152a78a
...
@@ -156,6 +156,10 @@ Davis E. King. <a href="http://jmlr.csail.mit.edu/papers/volume10/king09a/king09
...
@@ -156,6 +156,10 @@ Davis E. King. <a href="http://jmlr.csail.mit.edu/papers/volume10/king09a/king09
<name>
scale
</name>
<name>
scale
</name>
<link>
dlib/dnn/layers_abstract.h.html#scale_
</link>
<link>
dlib/dnn/layers_abstract.h.html#scale_
</link>
</item>
</item>
<item>
<name>
scale_prev
</name>
<link>
dlib/dnn/layers_abstract.h.html#scale_prev_
</link>
</item>
<item>
<item>
<name>
extract
</name>
<name>
extract
</name>
<link>
dlib/dnn/layers_abstract.h.html#extract_
</link>
<link>
dlib/dnn/layers_abstract.h.html#extract_
</link>
...
@@ -180,6 +184,10 @@ Davis E. King. <a href="http://jmlr.csail.mit.edu/papers/volume10/king09a/king09
...
@@ -180,6 +184,10 @@ Davis E. King. <a href="http://jmlr.csail.mit.edu/papers/volume10/king09a/king09
<name>
l2normalize
</name>
<name>
l2normalize
</name>
<link>
dlib/dnn/layers_abstract.h.html#l2normalize_
</link>
<link>
dlib/dnn/layers_abstract.h.html#l2normalize_
</link>
</item>
</item>
<item>
<name>
layer_norm
</name>
<link>
dlib/dnn/layers_abstract.h.html#layer_norm_
</link>
</item>
<item>
<item>
<name>
dropout
</name>
<name>
dropout
</name>
<link>
dlib/dnn/layers_abstract.h.html#dropout_
</link>
<link>
dlib/dnn/layers_abstract.h.html#dropout_
</link>
...
@@ -216,6 +224,10 @@ Davis E. King. <a href="http://jmlr.csail.mit.edu/papers/volume10/king09a/king09
...
@@ -216,6 +224,10 @@ Davis E. King. <a href="http://jmlr.csail.mit.edu/papers/volume10/king09a/king09
<name>
relu
</name>
<name>
relu
</name>
<link>
dlib/dnn/layers_abstract.h.html#relu_
</link>
<link>
dlib/dnn/layers_abstract.h.html#relu_
</link>
</item>
</item>
<item>
<name>
gelu
</name>
<link>
dlib/dnn/layers_abstract.h.html#gelu_
</link>
</item>
<item>
<item>
<name>
concat
</name>
<name>
concat
</name>
<link>
dlib/dnn/layers_abstract.h.html#concat_
</link>
<link>
dlib/dnn/layers_abstract.h.html#concat_
</link>
...
@@ -325,6 +337,10 @@ Davis E. King. <a href="http://jmlr.csail.mit.edu/papers/volume10/king09a/king09
...
@@ -325,6 +337,10 @@ Davis E. King. <a href="http://jmlr.csail.mit.edu/papers/volume10/king09a/king09
<name>
loss_mean_squared_per_channel_and_pixel
</name>
<name>
loss_mean_squared_per_channel_and_pixel
</name>
<link>
dlib/dnn/loss_abstract.h.html#loss_mean_squared_per_channel_and_pixel_
</link>
<link>
dlib/dnn/loss_abstract.h.html#loss_mean_squared_per_channel_and_pixel_
</link>
</item>
</item>
<item>
<name>
loss_multibinary_log
</name>
<link>
dlib/dnn/loss_abstract.h.html#loss_multibinary_log_
</link>
</item>
</sub>
</sub>
</item>
</item>
<item
nolink=
"true"
>
<item
nolink=
"true"
>
...
@@ -474,6 +490,7 @@ Davis E. King. <a href="http://jmlr.csail.mit.edu/papers/volume10/king09a/king09
...
@@ -474,6 +490,7 @@ Davis E. King. <a href="http://jmlr.csail.mit.edu/papers/volume10/king09a/king09
<name>
Data IO
</name>
<name>
Data IO
</name>
<item>
load_image_dataset_metadata
</item>
<item>
load_image_dataset_metadata
</item>
<item>
load_image_dataset
</item>
<item>
load_image_dataset
</item>
<item>
load_cifar_10_dataset
</item>
<item>
save_image_dataset_metadata
</item>
<item>
save_image_dataset_metadata
</item>
<item>
load_libsvm_formatted_data
</item>
<item>
load_libsvm_formatted_data
</item>
<item>
save_libsvm_formatted_data
</item>
<item>
save_libsvm_formatted_data
</item>
...
@@ -2868,6 +2885,17 @@ Davis E. King. <a href="http://jmlr.csail.mit.edu/papers/volume10/king09a/king09
...
@@ -2868,6 +2885,17 @@ Davis E. King. <a href="http://jmlr.csail.mit.edu/papers/volume10/king09a/king09
</component>
</component>
<!-- ************************************************************************* -->
<component>
<name>
load_cifar_10_dataset
</name>
<file>
dlib/data_io.h
</file>
<spec_file
link=
"true"
>
dlib/data_io/cifar_abstract.h
</spec_file>
<description>
Loads the
<a
href=
"https://www.cs.toronto.edu/~kriz/cifar.html"
>
CIFAR-10
</a>
from disk.
</description>
</component>
<!-- ************************************************************************* -->
<!-- ************************************************************************* -->
<component>
<component>
...
...
docs/docs/release_notes.xml
View file @
f152a78a
...
@@ -10,7 +10,40 @@
...
@@ -10,7 +10,40 @@
<!-- ************************************************************************************** -->
<!-- ************************************************************************************** -->
<current>
<current>
New Features and Improvements:
- Deep learning tooling:
- Added loss_multibinary_log_
- Added scale_prev layer
- Various ease of use improvements to the deep learning tooling, such as improved layer
visitors and increased DNN training stability.
- Added CUDA implementation for loss_multiclass_log_per_pixel_weighted.
- Add GELU activation layer
- Add Layer Normalization
- Add CIFAR-10 dataset loader: load_cifar_10_dataset()
- Add probability_values_are_increasing() and probability_values_are_increasing_robust().
- Expanded list of serializable types and added DLIB_DEFINE_DEFAULT_SERIALIZATION, a macro that
lets you make a class serializable with a single simple declaration.
- Added exponential and Weibull distributions to dlib::rand.
- For dlib::matrix:
- Added soft_max() and pointwise_pow()
- The FFT methods now support arbitrary sized FFTs and are more performant.
- Added user definable stopping condition support to find_min_global() and find_max_global().
Non-Backwards Compatible Changes:
- Rename POSIX macro to DLIB_POSIX to avoid name clashes with some libraries.
- Dropped support for gcc 4.8.
Bug fixes:
- Fixed bug in loss_mmod that degraded the quality of bounding box regression. Now
bounding box regression works a lot better.
- Fixes for code not compiling in various environments and support newer CUDA tooling.
</current>
<!-- ************************************************************************************** -->
<old
name=
"19.21"
date=
"Aug 08, 2020"
>
New Features and Improvements:
New Features and Improvements:
- Added support for cuDNN 8.0.
- Added support for cuDNN 8.0.
- Added support for CUDA in Python 3.8 on Windows.
- Added support for CUDA in Python 3.8 on Windows.
...
@@ -24,7 +57,7 @@ Bug fixes:
...
@@ -24,7 +57,7 @@ Bug fixes:
with CUDA enabled or who are using windows.
with CUDA enabled or who are using windows.
- Fix random forest regression not doing quite the right thing.
- Fix random forest regression not doing quite the right thing.
</
current
>
</
old
>
<!-- ************************************************************************************** -->
<!-- ************************************************************************************** -->
...
...
docs/docs/term_index.xml
View file @
f152a78a
...
@@ -138,7 +138,7 @@
...
@@ -138,7 +138,7 @@
<term
file=
"dlib/dnn/loss_abstract.h.html"
name=
"loss_multiclass_log_per_pixel_"
include=
"dlib/dnn.h"
/>
<term
file=
"dlib/dnn/loss_abstract.h.html"
name=
"loss_multiclass_log_per_pixel_"
include=
"dlib/dnn.h"
/>
<term
file=
"dlib/dnn/loss_abstract.h.html"
name=
"loss_multiclass_log_per_pixel_weighted_"
include=
"dlib/dnn.h"
/>
<term
file=
"dlib/dnn/loss_abstract.h.html"
name=
"loss_multiclass_log_per_pixel_weighted_"
include=
"dlib/dnn.h"
/>
<term
file=
"dlib/dnn/loss_abstract.h.html"
name=
"loss_mean_squared_per_channel_and_pixel_"
include=
"dlib/dnn.h"
/>
<term
file=
"dlib/dnn/loss_abstract.h.html"
name=
"loss_mean_squared_per_channel_and_pixel_"
include=
"dlib/dnn.h"
/>
<term
file=
"dlib/dnn/loss_abstract.h.html"
name=
"loss_m
ean_squared_per_channel
_"
include=
"dlib/dnn.h"
/>
<term
file=
"dlib/dnn/loss_abstract.h.html"
name=
"loss_m
ultibinary_log
_"
include=
"dlib/dnn.h"
/>
<term
file=
"dlib/dnn/loss_abstract.h.html"
name=
"loss_ranking_"
include=
"dlib/dnn.h"
/>
<term
file=
"dlib/dnn/loss_abstract.h.html"
name=
"loss_ranking_"
include=
"dlib/dnn.h"
/>
<term
file=
"dlib/dnn/loss_abstract.h.html"
name=
"loss_dot_"
include=
"dlib/dnn.h"
/>
<term
file=
"dlib/dnn/loss_abstract.h.html"
name=
"loss_dot_"
include=
"dlib/dnn.h"
/>
<term
file=
"dlib/dnn/loss_abstract.h.html"
name=
"loss_epsilon_insensitive_"
include=
"dlib/dnn.h"
/>
<term
file=
"dlib/dnn/loss_abstract.h.html"
name=
"loss_epsilon_insensitive_"
include=
"dlib/dnn.h"
/>
...
@@ -164,7 +164,9 @@
...
@@ -164,7 +164,9 @@
<term
file=
"dlib/dnn/layers_abstract.h.html"
name=
"upsample_"
include=
"dlib/dnn.h"
/>
<term
file=
"dlib/dnn/layers_abstract.h.html"
name=
"upsample_"
include=
"dlib/dnn.h"
/>
<term
file=
"dlib/dnn/layers_abstract.h.html"
name=
"cont_"
include=
"dlib/dnn.h"
/>
<term
file=
"dlib/dnn/layers_abstract.h.html"
name=
"cont_"
include=
"dlib/dnn.h"
/>
<term
file=
"dlib/dnn/layers_abstract.h.html"
name=
"scale_"
include=
"dlib/dnn.h"
/>
<term
file=
"dlib/dnn/layers_abstract.h.html"
name=
"scale_"
include=
"dlib/dnn.h"
/>
<term
file=
"dlib/dnn/layers_abstract.h.html"
name=
"scale_prev_"
include=
"dlib/dnn.h"
/>
<term
file=
"dlib/dnn/layers_abstract.h.html"
name=
"l2normalize_"
include=
"dlib/dnn.h"
/>
<term
file=
"dlib/dnn/layers_abstract.h.html"
name=
"l2normalize_"
include=
"dlib/dnn.h"
/>
<term
file=
"dlib/dnn/layers_abstract.h.html"
name=
"layer_norm_"
include=
"dlib/dnn.h"
/>
<term
file=
"dlib/dnn/layers_abstract.h.html"
name=
"dropout_"
include=
"dlib/dnn.h"
/>
<term
file=
"dlib/dnn/layers_abstract.h.html"
name=
"dropout_"
include=
"dlib/dnn.h"
/>
<term
file=
"dlib/dnn/layers_abstract.h.html"
name=
"multiply_"
include=
"dlib/dnn.h"
/>
<term
file=
"dlib/dnn/layers_abstract.h.html"
name=
"multiply_"
include=
"dlib/dnn.h"
/>
<term
file=
"dlib/dnn/layers_abstract.h.html"
name=
"bn_"
include=
"dlib/dnn.h"
/>
<term
file=
"dlib/dnn/layers_abstract.h.html"
name=
"bn_"
include=
"dlib/dnn.h"
/>
...
@@ -172,6 +174,7 @@
...
@@ -172,6 +174,7 @@
<term
file=
"dlib/dnn/layers_abstract.h.html"
name=
"max_pool_"
include=
"dlib/dnn.h"
/>
<term
file=
"dlib/dnn/layers_abstract.h.html"
name=
"max_pool_"
include=
"dlib/dnn.h"
/>
<term
file=
"dlib/dnn/layers_abstract.h.html"
name=
"avg_pool_"
include=
"dlib/dnn.h"
/>
<term
file=
"dlib/dnn/layers_abstract.h.html"
name=
"avg_pool_"
include=
"dlib/dnn.h"
/>
<term
file=
"dlib/dnn/layers_abstract.h.html"
name=
"relu_"
include=
"dlib/dnn.h"
/>
<term
file=
"dlib/dnn/layers_abstract.h.html"
name=
"relu_"
include=
"dlib/dnn.h"
/>
<term
file=
"dlib/dnn/layers_abstract.h.html"
name=
"gelu_"
include=
"dlib/dnn.h"
/>
<term
file=
"dlib/dnn/layers_abstract.h.html"
name=
"leaky_relu_"
include=
"dlib/dnn.h"
/>
<term
file=
"dlib/dnn/layers_abstract.h.html"
name=
"leaky_relu_"
include=
"dlib/dnn.h"
/>
<term
file=
"dlib/dnn/layers_abstract.h.html"
name=
"mish_"
include=
"dlib/dnn.h"
/>
<term
file=
"dlib/dnn/layers_abstract.h.html"
name=
"mish_"
include=
"dlib/dnn.h"
/>
<term
file=
"dlib/dnn/layers_abstract.h.html"
name=
"prelu_"
include=
"dlib/dnn.h"
/>
<term
file=
"dlib/dnn/layers_abstract.h.html"
name=
"prelu_"
include=
"dlib/dnn.h"
/>
...
@@ -460,6 +463,8 @@
...
@@ -460,6 +463,8 @@
<term
file=
"dlib/statistics/running_gradient_abstract.h.html"
name=
"probability_gradient_less_than"
include=
"dlib/statistics/running_gradient.h"
/>
<term
file=
"dlib/statistics/running_gradient_abstract.h.html"
name=
"probability_gradient_less_than"
include=
"dlib/statistics/running_gradient.h"
/>
<term
file=
"algorithms.html"
name=
"count_steps_without_decrease_robust"
include=
"dlib/statistics/running_gradient.h"
/>
<term
file=
"algorithms.html"
name=
"count_steps_without_decrease_robust"
include=
"dlib/statistics/running_gradient.h"
/>
<term
file=
"algorithms.html"
name=
"count_steps_without_decrease"
include=
"dlib/statistics/running_gradient.h"
/>
<term
file=
"algorithms.html"
name=
"count_steps_without_decrease"
include=
"dlib/statistics/running_gradient.h"
/>
<term
file=
"algorithms.html"
name=
"probability_values_are_increasing_robust"
include=
"dlib/statistics/running_gradient.h"
/>
<term
file=
"algorithms.html"
name=
"probability_values_are_increasing"
include=
"dlib/statistics/running_gradient.h"
/>
<term
file=
"algorithms.html"
name=
"count_steps_without_increase"
include=
"dlib/statistics/running_gradient.h"
/>
<term
file=
"algorithms.html"
name=
"count_steps_without_increase"
include=
"dlib/statistics/running_gradient.h"
/>
<term
file=
"algorithms.html"
name=
"running_scalar_covariance"
include=
"dlib/statistics.h"
/>
<term
file=
"algorithms.html"
name=
"running_scalar_covariance"
include=
"dlib/statistics.h"
/>
<term
file=
"algorithms.html"
name=
"mean_sign_agreement"
include=
"dlib/statistics.h"
/>
<term
file=
"algorithms.html"
name=
"mean_sign_agreement"
include=
"dlib/statistics.h"
/>
...
@@ -497,6 +502,7 @@
...
@@ -497,6 +502,7 @@
<term
link=
"ml.html#load_image_dataset_metadata"
name=
"image_dataset_metadata"
include=
"dlib/data_io.h"
/>
<term
link=
"ml.html#load_image_dataset_metadata"
name=
"image_dataset_metadata"
include=
"dlib/data_io.h"
/>
<term
file=
"ml.html"
name=
"load_image_dataset_metadata"
include=
"dlib/data_io.h"
/>
<term
file=
"ml.html"
name=
"load_image_dataset_metadata"
include=
"dlib/data_io.h"
/>
<term
file=
"ml.html"
name=
"load_image_dataset"
include=
"dlib/data_io.h"
/>
<term
file=
"ml.html"
name=
"load_image_dataset"
include=
"dlib/data_io.h"
/>
<term
file=
"ml.html"
name=
"load_cifar_10_dataset"
include=
"dlib/data_io.h"
/>
<term
file=
"ml.html"
name=
"save_image_dataset_metadata"
include=
"dlib/data_io.h"
/>
<term
file=
"ml.html"
name=
"save_image_dataset_metadata"
include=
"dlib/data_io.h"
/>
<term
file=
"ml.html"
name=
"load_libsvm_formatted_data"
include=
"dlib/data_io.h"
/>
<term
file=
"ml.html"
name=
"load_libsvm_formatted_data"
include=
"dlib/data_io.h"
/>
<term
file=
"ml.html"
name=
"save_libsvm_formatted_data"
include=
"dlib/data_io.h"
/>
<term
file=
"ml.html"
name=
"save_libsvm_formatted_data"
include=
"dlib/data_io.h"
/>
...
@@ -714,6 +720,7 @@
...
@@ -714,6 +720,7 @@
<term
file=
"dlib/matrix/matrix_math_functions_abstract.h.html"
name=
"round_zeros"
include=
"dlib/matrix.h"
/>
<term
file=
"dlib/matrix/matrix_math_functions_abstract.h.html"
name=
"round_zeros"
include=
"dlib/matrix.h"
/>
<term
file=
"dlib/matrix/matrix_math_functions_abstract.h.html"
name=
"complex_matrix"
include=
"dlib/matrix.h"
/>
<term
file=
"dlib/matrix/matrix_math_functions_abstract.h.html"
name=
"complex_matrix"
include=
"dlib/matrix.h"
/>
<term
file=
"dlib/matrix/matrix_math_functions_abstract.h.html"
name=
"normalize"
include=
"dlib/matrix.h"
/>
<term
file=
"dlib/matrix/matrix_math_functions_abstract.h.html"
name=
"normalize"
include=
"dlib/matrix.h"
/>
<term
file=
"dlib/matrix/matrix_math_functions_abstract.h.html"
name=
"soft_max"
include=
"dlib/matrix.h"
/>
<term
file=
"dlib/matrix/matrix_math_functions_abstract.h.html"
name=
"abs"
include=
"dlib/matrix.h"
/>
<term
file=
"dlib/matrix/matrix_math_functions_abstract.h.html"
name=
"abs"
include=
"dlib/matrix.h"
/>
<term
file=
"dlib/matrix/matrix_math_functions_abstract.h.html"
name=
"acos"
include=
"dlib/matrix.h"
/>
<term
file=
"dlib/matrix/matrix_math_functions_abstract.h.html"
name=
"acos"
include=
"dlib/matrix.h"
/>
...
@@ -841,6 +848,7 @@
...
@@ -841,6 +848,7 @@
<term
file=
"dlib/matrix/matrix_utilities_abstract.h.html"
name=
"min_pointwise"
include=
"dlib/matrix.h"
/>
<term
file=
"dlib/matrix/matrix_utilities_abstract.h.html"
name=
"min_pointwise"
include=
"dlib/matrix.h"
/>
<term
file=
"dlib/matrix/matrix_utilities_abstract.h.html"
name=
"max_pointwise"
include=
"dlib/matrix.h"
/>
<term
file=
"dlib/matrix/matrix_utilities_abstract.h.html"
name=
"max_pointwise"
include=
"dlib/matrix.h"
/>
<term
file=
"dlib/matrix/matrix_utilities_abstract.h.html"
name=
"pointwise_multiply"
include=
"dlib/matrix.h"
/>
<term
file=
"dlib/matrix/matrix_utilities_abstract.h.html"
name=
"pointwise_multiply"
include=
"dlib/matrix.h"
/>
<term
file=
"dlib/matrix/matrix_utilities_abstract.h.html"
name=
"pointwise_pow"
include=
"dlib/matrix.h"
/>
<term
file=
"dlib/matrix/matrix_utilities_abstract.h.html"
name=
"join_rows"
include=
"dlib/matrix.h"
/>
<term
file=
"dlib/matrix/matrix_utilities_abstract.h.html"
name=
"join_rows"
include=
"dlib/matrix.h"
/>
<term
file=
"dlib/matrix/matrix_utilities_abstract.h.html"
name=
"join_cols"
include=
"dlib/matrix.h"
/>
<term
file=
"dlib/matrix/matrix_utilities_abstract.h.html"
name=
"join_cols"
include=
"dlib/matrix.h"
/>
<term
file=
"dlib/matrix/matrix_utilities_abstract.h.html"
name=
"equal"
include=
"dlib/matrix.h"
/>
<term
file=
"dlib/matrix/matrix_utilities_abstract.h.html"
name=
"equal"
include=
"dlib/matrix.h"
/>
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment