/
so4b.tex
151 lines (127 loc) · 4.5 KB
/
so4b.tex
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
\documentclass{article}
% Include macros here
\input{macros}
\usepackage{fancyhdr}
%\include{macros}
\usepackage{pifont}
% Number of problem sheet
\newcounter{problemSheetNumber}
\setcounter{problemSheetNumber}{4}
\newcommand{\matlabprob}{\ding{100} \ }
\newcommand{\examprob}{\ding{80} \ }
%\setcounter{section}{\theproblemSheetNumber}
%\renewcommand{\theparagraph}{(\thesection.\arabic{paragraph})}
\newcounter{problems}
\setcounter{problems}{3}
%\setlength{\parindent}{0cm}
\renewcommand{\problem}[1]{\paragraph{(\theproblemSheetNumber.\theproblems)}\addtocounter{problems}{1}\label{#1}}
\renewcommand{\solution}[1]{\paragraph{Solution (\theproblemSheetNumber.\theproblems)}\addtocounter{problems}{1}\label{#1}}
\pagestyle{fancy}
\lhead{MATH36061}
\chead{Convex Optimization}
\rhead{October 24, 2016}
\begin{document}
\begin{center}
{\Large {\bf Solutions to Part B of Problem Sheet \theproblemSheetNumber}}
\end{center}
\solution{pr:1}
\begin{itemize}
\item[(a)] Given complex numbers $z_1=a+ib$ and $z_2=c+id$, we can express the real and imaginary parts of the product $z_3=z_1z_2$ as
\begin{equation*}
\begin{pmatrix}
\mathrm{re}(z_3)\\
\mathrm{im}(z_3)
\end{pmatrix}=
\begin{pmatrix}
a & -b\\
b & a
\end{pmatrix}
\begin{pmatrix}
c \\ d
\end{pmatrix}.
\end{equation*}
In the same fashion, a system of equations $\mtx{A}\vct{x}=\vct{b}$, with $\mtx{A}$ and $\vct{x}$ complex, we can be written as
\begin{equation*}
\begin{pmatrix}
\mathrm{re}(\vct{b})\\
\mathrm{im}(\vct{b})
\end{pmatrix}=
\begin{pmatrix}
\mathrm{re}(\mtx{A}) & -\mathrm{im}(\mtx{A})\\
\mathrm{im}(\mtx{A}) & \mathrm{re}(\mtx{A})
\end{pmatrix}
\begin{pmatrix}
\mathrm{re}(\vct{c})\\
\mathrm{im}(\vct{c})
\end{pmatrix}.
\end{equation*}
Since we know that the target vector $\vct{b}$ is real, we only need the upper half of this system. Once this is solved, we can assemble the complex $\vct{c}$ from it.
\item[(b)+(c)] The code could look something like this:
\begin{ipythonnb}
import numpy as np
import numpy.linalg as la
import numpy.random as rnd
import numpy.fft as fft
import matplotlib.pyplot as plt
import cvxpy as cvx
\end{ipythonnb}
\begin{ipythonnb}
def f(x):
return 1.7*np.sin(30.*x)+0.5*np.cos(9.*x)+0.5*np.sin(6.*x)
-np.cos(11.*x)+0.2*np.sin(13.*x)
\end{ipythonnb}
\begin{ipythonnb}
n = 512
T = 2*np.pi/n
xx = np.linspace(0,2*np.pi-T,n)
yy = f(xx)
% matplotlib inline
plt.plot(xx,yy,linewidth=3)
plt.show()
\end{ipythonnb}
\includegraphics[width=0.5\textwidth]{images/41.png}
\begin{ipythonnb}
m = 30
p = rnd.permutation(n)
points = xx[p[:m]]
samples = f(points)
plt.plot(xx,yy,linewidth=2)
plt.plot(points,samples,'o', color='red')
plt.show()
\end{ipythonnb}
The red dots indicate the points that we see. We know nothing else about the signal!
\includegraphics[width=0.5\textwidth]{images/42.png}
We now show how to reconstruct the whole blue curve from the knowledge of the red dots alone. We do this by setting up an optimization problem of the form
\begin{equation*}
\minimize \|\vct{x}\|_1 \quad \subjto \mtx{A}\vct{x}=\vct{b}
\end{equation*}
for suitable matrix $\mtx{A}$ and vector $\vct{b}$. How $\mtx{A}$ and $\vct{b}$ are constructed is described in the problem description. Below is the implementation.
\begin{ipythonnb}
D = fft.ifft(np.eye(n))
rD = np.concatenate((D.real, D.imag), axis=1)
A = rD[p[:m],:]
fy = fft.fft(yy)
b = np.dot(A,np.concatenate((fy.real, fy.imag), axis=0))
\end{ipythonnb}
\begin{ipythonnb}
x = cvx.Variable(2*n)
constraints = [A*x == b]
obj = cvx.Minimize(cvx.norm(x,1))
prob = cvx.Problem(obj, constraints)
prob.solve()
x = np.array(x.value).transpose()[0]
\end{ipythonnb}
\begin{ipythonnb}
newy_im = fft.ifft(x[:n]+1j*x[n:])
newy = newy_im.real
print la.norm(newy-yy,1)
plt.subplot(2,1,1)
plt.plot(xx,yy,linewidth=3)
plt.subplot(2,1,2)
plt.plot(xx,newy,linewidth=3)
plt.show()
\end{ipythonnb}
\includegraphics[width=0.8\textwidth]{images/43.png}
The error obtained is of order $10^{-7}$. Now the interesting question is: \textbf{how much undersampling can we get away with?} To find out, we can repeat the previous experiment with values of m between $1$ and $512$ and find out where the method starts working. Obviously sampling only one point will not work (not enough information), and sampling all 512 points will work (we have all the information). As we say, $30$ points is already sufficient, but can we do with less?
\end{itemize}
\end{document}