Skip to content

Commit

Permalink
updated docs
Browse files Browse the repository at this point in the history
  • Loading branch information
davisking committed Mar 28, 2021
1 parent a44ddd7 commit f152a78
Show file tree
Hide file tree
Showing 5 changed files with 107 additions and 2 deletions.
28 changes: 28 additions & 0 deletions docs/docs/algorithms.xml
Original file line number Diff line number Diff line change
Expand Up @@ -76,6 +76,8 @@
<item>count_steps_without_decrease_robust</item>
<item>count_steps_without_decrease</item>
<item>count_steps_without_increase</item>
<item>probability_values_are_increasing</item>
<item>probability_values_are_increasing_robust</item>

<item>binomial_random_vars_are_different</item>
<item>event_correlation</item>
Expand Down Expand Up @@ -752,6 +754,32 @@
</description>
</component>

<!-- ************************************************************************* -->

<component>
<name>probability_values_are_increasing</name>
<file>dlib/statistics/running_gradient.h</file>
<spec_file link="true">dlib/statistics/running_gradient_abstract.h</spec_file>
<description>
Given a potentially noisy time series, this function returns the probability that those
values are increasing in magnitude.
</description>
</component>

<!-- ************************************************************************* -->

<component>
<name>probability_values_are_increasing_robust</name>
<file>dlib/statistics/running_gradient.h</file>
<spec_file link="true">dlib/statistics/running_gradient_abstract.h</spec_file>
<description>
This function behaves just like <a
href="#probability_values_are_increasing">probability_values_are_increasing</a> except
that it ignores times series values that are anomalously large. This makes it
robust to sudden noisy but transient spikes in the time series values.
</description>
</component>

<!-- ************************************************************************* -->

<component>
Expand Down
8 changes: 8 additions & 0 deletions docs/docs/linear_algebra.xml
Original file line number Diff line number Diff line change
Expand Up @@ -163,6 +163,10 @@
<name>tanh</name>
<link>dlib/matrix/matrix_math_functions_abstract.h.html#tanh</link>
</item>
<item>
<name>soft_max</name>
<link>dlib/matrix/matrix_math_functions_abstract.h.html#soft_max</link>
</item>
</sub>
</item>
<item nolink="true">
Expand Down Expand Up @@ -549,6 +553,10 @@
<name>pointwise_multiply</name>
<link>dlib/matrix/matrix_utilities_abstract.h.html#pointwise_multiply</link>
</item>
<item>
<name>pointwise_pow</name>
<link>dlib/matrix/matrix_utilities_abstract.h.html#pointwise_pow</link>
</item>
<item>
<name>join_rows</name>
<link>dlib/matrix/matrix_utilities_abstract.h.html#join_rows</link>
Expand Down
28 changes: 28 additions & 0 deletions docs/docs/ml.xml
Original file line number Diff line number Diff line change
Expand Up @@ -156,6 +156,10 @@ Davis E. King. <a href="http://jmlr.csail.mit.edu/papers/volume10/king09a/king09
<name>scale</name>
<link>dlib/dnn/layers_abstract.h.html#scale_</link>
</item>
<item>
<name>scale_prev</name>
<link>dlib/dnn/layers_abstract.h.html#scale_prev_</link>
</item>
<item>
<name>extract</name>
<link>dlib/dnn/layers_abstract.h.html#extract_</link>
Expand All @@ -180,6 +184,10 @@ Davis E. King. <a href="http://jmlr.csail.mit.edu/papers/volume10/king09a/king09
<name>l2normalize</name>
<link>dlib/dnn/layers_abstract.h.html#l2normalize_</link>
</item>
<item>
<name>layer_norm</name>
<link>dlib/dnn/layers_abstract.h.html#layer_norm_</link>
</item>
<item>
<name>dropout</name>
<link>dlib/dnn/layers_abstract.h.html#dropout_</link>
Expand Down Expand Up @@ -216,6 +224,10 @@ Davis E. King. <a href="http://jmlr.csail.mit.edu/papers/volume10/king09a/king09
<name>relu</name>
<link>dlib/dnn/layers_abstract.h.html#relu_</link>
</item>
<item>
<name>gelu</name>
<link>dlib/dnn/layers_abstract.h.html#gelu_</link>
</item>
<item>
<name>concat</name>
<link>dlib/dnn/layers_abstract.h.html#concat_</link>
Expand Down Expand Up @@ -325,6 +337,10 @@ Davis E. King. <a href="http://jmlr.csail.mit.edu/papers/volume10/king09a/king09
<name>loss_mean_squared_per_channel_and_pixel</name>
<link>dlib/dnn/loss_abstract.h.html#loss_mean_squared_per_channel_and_pixel_</link>
</item>
<item>
<name>loss_multibinary_log</name>
<link>dlib/dnn/loss_abstract.h.html#loss_multibinary_log_</link>
</item>
</sub>
</item>
<item nolink="true">
Expand Down Expand Up @@ -474,6 +490,7 @@ Davis E. King. <a href="http://jmlr.csail.mit.edu/papers/volume10/king09a/king09
<name>Data IO</name>
<item>load_image_dataset_metadata</item>
<item>load_image_dataset</item>
<item>load_cifar_10_dataset</item>
<item>save_image_dataset_metadata</item>
<item>load_libsvm_formatted_data</item>
<item>save_libsvm_formatted_data</item>
Expand Down Expand Up @@ -2868,6 +2885,17 @@ Davis E. King. <a href="http://jmlr.csail.mit.edu/papers/volume10/king09a/king09

</component>

<!-- ************************************************************************* -->

<component>
<name>load_cifar_10_dataset</name>
<file>dlib/data_io.h</file>
<spec_file link="true">dlib/data_io/cifar_abstract.h</spec_file>
<description>
Loads the <a href="https://www.cs.toronto.edu/~kriz/cifar.html">CIFAR-10</a> from disk.
</description>
</component>

<!-- ************************************************************************* -->

<component>
Expand Down
35 changes: 34 additions & 1 deletion docs/docs/release_notes.xml
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,40 @@

<!-- ************************************************************************************** -->


<current>
New Features and Improvements:
- Deep learning tooling:
- Added loss_multibinary_log_
- Added scale_prev layer
- Various ease of use improvements to the deep learning tooling, such as improved layer
visitors and increased DNN training stability.
- Added CUDA implementation for loss_multiclass_log_per_pixel_weighted.
- Add GELU activation layer
- Add Layer Normalization
- Add CIFAR-10 dataset loader: load_cifar_10_dataset()
- Add probability_values_are_increasing() and probability_values_are_increasing_robust().
- Expanded list of serializable types and added DLIB_DEFINE_DEFAULT_SERIALIZATION, a macro that
lets you make a class serializable with a single simple declaration.
- Added exponential and Weibull distributions to dlib::rand.
- For dlib::matrix:
- Added soft_max() and pointwise_pow()
- The FFT methods now support arbitrary sized FFTs and are more performant.
- Added user definable stopping condition support to find_min_global() and find_max_global().

Non-Backwards Compatible Changes:
- Rename POSIX macro to DLIB_POSIX to avoid name clashes with some libraries.
- Dropped support for gcc 4.8.

Bug fixes:
- Fixed bug in loss_mmod that degraded the quality of bounding box regression. Now
bounding box regression works a lot better.
- Fixes for code not compiling in various environments and support newer CUDA tooling.
</current>

<!-- ************************************************************************************** -->

<old name="19.21" date="Aug 08, 2020">
New Features and Improvements:
- Added support for cuDNN 8.0.
- Added support for CUDA in Python 3.8 on Windows.
Expand All @@ -24,7 +57,7 @@ Bug fixes:
with CUDA enabled or who are using windows.
- Fix random forest regression not doing quite the right thing.

</current>
</old>

<!-- ************************************************************************************** -->

Expand Down
10 changes: 9 additions & 1 deletion docs/docs/term_index.xml
Original file line number Diff line number Diff line change
Expand Up @@ -138,7 +138,7 @@
<term file="dlib/dnn/loss_abstract.h.html" name="loss_multiclass_log_per_pixel_" include="dlib/dnn.h"/>
<term file="dlib/dnn/loss_abstract.h.html" name="loss_multiclass_log_per_pixel_weighted_" include="dlib/dnn.h"/>
<term file="dlib/dnn/loss_abstract.h.html" name="loss_mean_squared_per_channel_and_pixel_" include="dlib/dnn.h"/>
<term file="dlib/dnn/loss_abstract.h.html" name="loss_mean_squared_per_channel_" include="dlib/dnn.h"/>
<term file="dlib/dnn/loss_abstract.h.html" name="loss_multibinary_log_" include="dlib/dnn.h"/>
<term file="dlib/dnn/loss_abstract.h.html" name="loss_ranking_" include="dlib/dnn.h"/>
<term file="dlib/dnn/loss_abstract.h.html" name="loss_dot_" include="dlib/dnn.h"/>
<term file="dlib/dnn/loss_abstract.h.html" name="loss_epsilon_insensitive_" include="dlib/dnn.h"/>
Expand All @@ -164,14 +164,17 @@
<term file="dlib/dnn/layers_abstract.h.html" name="upsample_" include="dlib/dnn.h"/>
<term file="dlib/dnn/layers_abstract.h.html" name="cont_" include="dlib/dnn.h"/>
<term file="dlib/dnn/layers_abstract.h.html" name="scale_" include="dlib/dnn.h"/>
<term file="dlib/dnn/layers_abstract.h.html" name="scale_prev_" include="dlib/dnn.h"/>
<term file="dlib/dnn/layers_abstract.h.html" name="l2normalize_" include="dlib/dnn.h"/>
<term file="dlib/dnn/layers_abstract.h.html" name="layer_norm_" include="dlib/dnn.h"/>
<term file="dlib/dnn/layers_abstract.h.html" name="dropout_" include="dlib/dnn.h"/>
<term file="dlib/dnn/layers_abstract.h.html" name="multiply_" include="dlib/dnn.h"/>
<term file="dlib/dnn/layers_abstract.h.html" name="bn_" include="dlib/dnn.h"/>
<term file="dlib/dnn/layers_abstract.h.html" name="affine_" include="dlib/dnn.h"/>
<term file="dlib/dnn/layers_abstract.h.html" name="max_pool_" include="dlib/dnn.h"/>
<term file="dlib/dnn/layers_abstract.h.html" name="avg_pool_" include="dlib/dnn.h"/>
<term file="dlib/dnn/layers_abstract.h.html" name="relu_" include="dlib/dnn.h"/>
<term file="dlib/dnn/layers_abstract.h.html" name="gelu_" include="dlib/dnn.h"/>
<term file="dlib/dnn/layers_abstract.h.html" name="leaky_relu_" include="dlib/dnn.h"/>
<term file="dlib/dnn/layers_abstract.h.html" name="mish_" include="dlib/dnn.h"/>
<term file="dlib/dnn/layers_abstract.h.html" name="prelu_" include="dlib/dnn.h"/>
Expand Down Expand Up @@ -460,6 +463,8 @@
<term file="dlib/statistics/running_gradient_abstract.h.html" name="probability_gradient_less_than" include="dlib/statistics/running_gradient.h"/>
<term file="algorithms.html" name="count_steps_without_decrease_robust" include="dlib/statistics/running_gradient.h"/>
<term file="algorithms.html" name="count_steps_without_decrease" include="dlib/statistics/running_gradient.h"/>
<term file="algorithms.html" name="probability_values_are_increasing_robust" include="dlib/statistics/running_gradient.h"/>
<term file="algorithms.html" name="probability_values_are_increasing" include="dlib/statistics/running_gradient.h"/>
<term file="algorithms.html" name="count_steps_without_increase" include="dlib/statistics/running_gradient.h"/>
<term file="algorithms.html" name="running_scalar_covariance" include="dlib/statistics.h"/>
<term file="algorithms.html" name="mean_sign_agreement" include="dlib/statistics.h"/>
Expand Down Expand Up @@ -497,6 +502,7 @@
<term link="ml.html#load_image_dataset_metadata" name="image_dataset_metadata" include="dlib/data_io.h"/>
<term file="ml.html" name="load_image_dataset_metadata" include="dlib/data_io.h"/>
<term file="ml.html" name="load_image_dataset" include="dlib/data_io.h"/>
<term file="ml.html" name="load_cifar_10_dataset" include="dlib/data_io.h"/>
<term file="ml.html" name="save_image_dataset_metadata" include="dlib/data_io.h"/>
<term file="ml.html" name="load_libsvm_formatted_data" include="dlib/data_io.h"/>
<term file="ml.html" name="save_libsvm_formatted_data" include="dlib/data_io.h"/>
Expand Down Expand Up @@ -714,6 +720,7 @@
<term file="dlib/matrix/matrix_math_functions_abstract.h.html" name="round_zeros" include="dlib/matrix.h"/>
<term file="dlib/matrix/matrix_math_functions_abstract.h.html" name="complex_matrix" include="dlib/matrix.h"/>
<term file="dlib/matrix/matrix_math_functions_abstract.h.html" name="normalize" include="dlib/matrix.h"/>
<term file="dlib/matrix/matrix_math_functions_abstract.h.html" name="soft_max" include="dlib/matrix.h"/>

<term file="dlib/matrix/matrix_math_functions_abstract.h.html" name="abs" include="dlib/matrix.h"/>
<term file="dlib/matrix/matrix_math_functions_abstract.h.html" name="acos" include="dlib/matrix.h"/>
Expand Down Expand Up @@ -841,6 +848,7 @@
<term file="dlib/matrix/matrix_utilities_abstract.h.html" name="min_pointwise" include="dlib/matrix.h"/>
<term file="dlib/matrix/matrix_utilities_abstract.h.html" name="max_pointwise" include="dlib/matrix.h"/>
<term file="dlib/matrix/matrix_utilities_abstract.h.html" name="pointwise_multiply" include="dlib/matrix.h"/>
<term file="dlib/matrix/matrix_utilities_abstract.h.html" name="pointwise_pow" include="dlib/matrix.h"/>
<term file="dlib/matrix/matrix_utilities_abstract.h.html" name="join_rows" include="dlib/matrix.h"/>
<term file="dlib/matrix/matrix_utilities_abstract.h.html" name="join_cols" include="dlib/matrix.h"/>
<term file="dlib/matrix/matrix_utilities_abstract.h.html" name="equal" include="dlib/matrix.h"/>
Expand Down

0 comments on commit f152a78

Please sign in to comment.