/
SolveLsTvAdmm.m
124 lines (98 loc) · 4.11 KB
/
SolveLsTvAdmm.m
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
function [ vX, mX ] = SolveLsTvAdmm( vX, mA, vB, mD, paramLambda, numIterations )
% ----------------------------------------------------------------------------------------------- %
%[ vX, mX ] = SolveLsTvAdmm( vX, mA, vB, mD, paramLambda, numIterations )
% Solve TV Regularized Least Squares Using Alternating Direction Method of
% Multipliers (ADMM) Method. Basically solves the problem given by:
% $$ \arg \min_{ x \in \mathbb{R}^{n} } \frac{1}{2} {\left\| A x - b \right|}_{2}^{2} + \lambda {\left\| D x \right\|}_{1} $$
% Input:
% - vX - Input Vector.
% Initialization of the iterative process.
% Structure: Vector (n X 1).
% Type: 'Single' / 'Double'.
% Range: (-inf, inf).
% - mA - Input Matirx.
% The model matrix.
% Structure: Matrix (m X n).
% Type: 'Single' / 'Double'.
% Range: (-inf, inf).
% - vB - Input Vector.
% The model known data.
% Structure: Vector (m X 1).
% Type: 'Single' / 'Double'.
% Range: (-inf, inf).
% - paramLambda - Parameter Lambda.
% The L1 Regularization parameter.
% Structure: Scalar.
% Type: 'Single' / 'Double'.
% Range: (0, inf).
% - numIterations - Number of Iterations.
% Number of iterations of the algorithm.
% Structure: Scalar.
% Type: 'Single' / 'Double'.
% Range {1, 2, ...}.
% Output:
% - vX - Output Vector.
% Structure: Vector (n X 1).
% Type: 'Single' / 'Double'.
% Range: (-inf, inf).
% References
% 1. Wikipedia ADMM - https://en.wikipedia.org/wiki/Augmented_Lagrangian_method#Alternating_direction_method_of_multipliers.
% Remarks:
% 1. Using vanilla ADMM with no optimization of the parameter or
% smoothing.
% 2. Matrix Factorization caching according to "Matrix Inversion Lemma"
% (See S. Boyd, N. Parikh, E. Chu, B. Peleato, and J. Eckstein -
% Distributed Optimization and Statistical Learning via the
% Alternating Direction Method of Multipliers Page 28). Basically:
% (mA.' * mA + paramRho * I)^(-1) = (1 / paramRho) + (1 / (paramRho * paramRho)) * mA.' * (I + (1 /
% paramRho) * mA * mA.')^(-1) * mA
% Known Issues:
% 1. A
% TODO:
% 1. Pre calculate decomposition of the Linear System.
% Release Notes:
% - 1.0.000 27/11/2019 Royi Avital
% * First realease version.
% ----------------------------------------------------------------------------------------------- %
MAT_TYPE_SKINNY = 1;
MAT_TYPE_FAT = 2;
if(size(mA, 1) >= size(mA, 2))
matType = MAT_TYPE_SKINNY;
else
matType = MAT_TYPE_FAT;
end
vAb = mA.' * vB;
if(isempty(vX))
vX = pinv(mA) * vB; %<! Dealing with "Fat Matrix"
end
paramRho = 5;
% mC = inv((mA.' * mA) + paramRho * (mD.' * mD));
mC = decomposition((mA.' * mA) + paramRho * (mD.' * mD), 'chol');
vZ = ProxL1(mD * vX, paramLambda / paramRho);
vU = mD * vX - vZ;
mX = zeros([size(vX, 1), numIterations]);
mX(:, 1) = vX;
for ii = 2:numIterations
vX = mC \ (vAb + (paramRho * mD.' * (vZ - vU)));
vZ = ProxL1(mD * vX + vU, paramLambda / paramRho);
vU = vU + mD * vX - vZ;
mX(:, ii) = vX;
end
end
function [ vX ] = ProxL1( vX, lambdaFactor )
% Soft Thresholding
vX = max(vX - lambdaFactor, 0) + min(vX + lambdaFactor, 0);
end
function [ mL, mU ] = MatrixFactorize( mA, paramRho, matType )
MAT_TYPE_SKINNY = 1;
MAT_TYPE_FAT = 2;
switch(matType)
case(MAT_TYPE_SKINNY)
mI = eye(size(mA, 2));
mL = chol((mA.' * mA) + (paramRho * mI), 'lower');
case(MAT_TYPE_FAT)
mI = eye(size(mA, 1));
mL = chol(mI + ((1 / paramRho) * (mA * mA.')), 'lower');
end
mU = mL.';
end