diff --git a/docs/docs/algorithms.xml b/docs/docs/algorithms.xml
index 82c58f94a1..56fc0aa0c0 100644
--- a/docs/docs/algorithms.xml
+++ b/docs/docs/algorithms.xml
@@ -76,6 +76,8 @@
count_steps_without_decrease_robustcount_steps_without_decreasecount_steps_without_increase
+ probability_values_are_increasing
+ probability_values_are_increasing_robustbinomial_random_vars_are_differentevent_correlation
@@ -752,6 +754,32 @@
+
+
+
+ probability_values_are_increasing
+ dlib/statistics/running_gradient.h
+ dlib/statistics/running_gradient_abstract.h
+
+ Given a potentially noisy time series, this function returns the probability that those
+ values are increasing in magnitude.
+
+
+
+
+
+
+ probability_values_are_increasing_robust
+ dlib/statistics/running_gradient.h
+ dlib/statistics/running_gradient_abstract.h
+
+ This function behaves just like probability_values_are_increasing except
+ that it ignores times series values that are anomalously large. This makes it
+ robust to sudden noisy but transient spikes in the time series values.
+
+
+
diff --git a/docs/docs/linear_algebra.xml b/docs/docs/linear_algebra.xml
index 2e0ec474c7..45ee0bace0 100644
--- a/docs/docs/linear_algebra.xml
+++ b/docs/docs/linear_algebra.xml
@@ -163,6 +163,10 @@
tanh
dlib/matrix/matrix_math_functions_abstract.h.html#tanh
+
+ soft_max
+ dlib/matrix/matrix_math_functions_abstract.h.html#soft_max
+
@@ -549,6 +553,10 @@
pointwise_multiply
dlib/matrix/matrix_utilities_abstract.h.html#pointwise_multiply
+
+ pointwise_pow
+ dlib/matrix/matrix_utilities_abstract.h.html#pointwise_pow
+ join_rows
dlib/matrix/matrix_utilities_abstract.h.html#join_rows
diff --git a/docs/docs/ml.xml b/docs/docs/ml.xml
index cf6f623794..9c4e768b4c 100644
--- a/docs/docs/ml.xml
+++ b/docs/docs/ml.xml
@@ -156,6 +156,10 @@ Davis E. King. l2normalize
dlib/dnn/layers_abstract.h.html#l2normalize_
+
+ layer_norm
+ dlib/dnn/layers_abstract.h.html#layer_norm_
+ dropout
dlib/dnn/layers_abstract.h.html#dropout_
@@ -216,6 +224,10 @@ Davis E. King. loss_mean_squared_per_channel_and_pixel
dlib/dnn/loss_abstract.h.html#loss_mean_squared_per_channel_and_pixel_
+
+ loss_multibinary_log
+ dlib/dnn/loss_abstract.h.html#loss_multibinary_log_
+
@@ -474,6 +490,7 @@ Davis E. King.
+
+
+
+ load_cifar_10_dataset
+ dlib/data_io.h
+ dlib/data_io/cifar_abstract.h
+
+ Loads the CIFAR-10 from disk.
+
+
+
diff --git a/docs/docs/release_notes.xml b/docs/docs/release_notes.xml
index bd7f08f0b7..5e0d7d88b0 100644
--- a/docs/docs/release_notes.xml
+++ b/docs/docs/release_notes.xml
@@ -10,7 +10,40 @@
+
+New Features and Improvements:
+ - Deep learning tooling:
+ - Added loss_multibinary_log_
+ - Added scale_prev layer
+ - Various ease of use improvements to the deep learning tooling, such as improved layer
+ visitors and increased DNN training stability.
+ - Added CUDA implementation for loss_multiclass_log_per_pixel_weighted.
+ - Add GELU activation layer
+ - Add Layer Normalization
+ - Add CIFAR-10 dataset loader: load_cifar_10_dataset()
+ - Add probability_values_are_increasing() and probability_values_are_increasing_robust().
+ - Expanded list of serializable types and added DLIB_DEFINE_DEFAULT_SERIALIZATION, a macro that
+ lets you make a class serializable with a single simple declaration.
+ - Added exponential and Weibull distributions to dlib::rand.
+ - For dlib::matrix:
+ - Added soft_max() and pointwise_pow()
+ - The FFT methods now support arbitrary sized FFTs and are more performant.
+ - Added user definable stopping condition support to find_min_global() and find_max_global().
+
+Non-Backwards Compatible Changes:
+ - Rename POSIX macro to DLIB_POSIX to avoid name clashes with some libraries.
+ - Dropped support for gcc 4.8.
+
+Bug fixes:
+ - Fixed bug in loss_mmod that degraded the quality of bounding box regression. Now
+ bounding box regression works a lot better.
+ - Fixes for code not compiling in various environments and support newer CUDA tooling.
+
+
+
+
+
New Features and Improvements:
- Added support for cuDNN 8.0.
- Added support for CUDA in Python 3.8 on Windows.
@@ -24,7 +57,7 @@ Bug fixes:
with CUDA enabled or who are using windows.
- Fix random forest regression not doing quite the right thing.
-
+
diff --git a/docs/docs/term_index.xml b/docs/docs/term_index.xml
index fce311bd13..e6dd721e5c 100644
--- a/docs/docs/term_index.xml
+++ b/docs/docs/term_index.xml
@@ -138,7 +138,7 @@
-
+
@@ -164,7 +164,9 @@
+
+
@@ -172,6 +174,7 @@
+
@@ -460,6 +463,8 @@
+
+
@@ -497,6 +502,7 @@
+
@@ -714,6 +720,7 @@
+
@@ -841,6 +848,7 @@
+