/
m458.tex
1588 lines (1387 loc) · 77.4 KB
/
m458.tex
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
\documentclass[11pt]{scrartcl}
\usepackage[margin=2cm]{geometry}
\usepackage{amsmath}
\usepackage{fancyhdr}
\usepackage{amsfonts}
\usepackage{amssymb,amsmath,amsthm}
\usepackage{xcolor}
\usepackage{enumitem}
\newcommand{\R}[0]{\mathbb{R}}
\addtokomafont{section}{\rmfamily\centering\scshape}
% math environments
\usepackage[utf8]{inputenc}
\theoremstyle{definition}
\newtheorem{theorem}{Theorem}
\newtheorem{corollary}{Corollary}
\newtheorem{lemma}[theorem]{Lemma}
\newtheorem{definition}{Definition}
\newtheorem{prop}{Proposition}
\newtheorem{ex}{Example}
\theoremstyle{remark}
\newtheorem*{remark}{Remark}
\usepackage{hyperref}
\hypersetup{
colorlinks,
citecolor=black,
filecolor=black,
linkcolor=black,
urlcolor=black
}
% definition
\newcommand{\dfn}[1]{\textbf{\underline{#1}}}
\newcommand{\dist}[0]{\mathcal{F}}
\newcommand{\pr}[1]{\mathbb{P}[#1]}
\newcommand{\stat}[0]{T(X_1, ..., X_n )}
% converge in probability
\newcommand{\cvp}[0]{\overset{p}{\to}}
% sample mean
\newcommand{\smean}[0]{\frac{1}{n} \sum_{i=1}^n x_i}
% sample variance
\newcommand{\svar}[0]{\frac{1}{(n-1)} \sum_{i=1}^n (x_i - \overline{x})^2}
% expected value
\newcommand{\EX}[1]{\mathbb{E}\left[#1 \right]}
\newcommand{\EXth}[1]{\mathbb{E}_\theta \left[ #1 \right]}
% integral
\newcommand{\idx}[2]{\int_{#1}^{#2}}
% vector
\newcommand{\vect}[1]{\mathbf{#1}}
\title{\textbf{Math 458: Differential Geometry}}
\author{Shereen Elaidi}
\date{Winter 2020 Term}
\begin{document}
\begin{center}
\textbf{Math 458: Differential Geometry} \\
\textbf{Midterm Date: 13 March 2020 11.30 - 13.00} \\
\textbf{Key Results, Theorems, Definitions, etc.} \\
\textbf{Shereen Elaidi}
\end{center}
\tableofcontents
\pagestyle{fancy}
\lhead{Math 458: Differential Geometry}
\chead{Winter 2020 -- Summary}
\rhead{Page \thepage}
\lfoot{}
\cfoot{}
\rfoot{}
\renewcommand{\headrulewidth}{0.4pt}
\renewcommand{\footrulewidth}{0.4pt}
\setlength{\tabcolsep}{0.5em} % for the horizontal padding
{\renewcommand{\arraystretch}{1.2}% for the vertical padding
\section{Introduction}
This course is about differential geometry of curves, surfaces, and manifolds in $\R^3$ + integration with differential forms.
\subsection{Dual Spaces}
I am including this since I did not learn about dual spaces in my linear algebra class.
\begin{definition}[Linear Functional]
Let $V$ be a vector space over $K$. A map $\phi: V \rightarrow K$ is a \dfn{linear functional} if $\forall$ $v , u \in V$, $a, b \in K$:
\begin{align}
\phi(au+ bv) = a \phi(u) + b \phi(v)
\end{align}
\end{definition}
Examples of linear functionals:
\begin{enumerate}[noitemsep]
\item Let $V$ be the vector space of polynomials in $t$ over $\R$. Define the definite integral operator $J(p(t)):= \idx{0}{1} p(t)dt$. By the linearity of integration, this is a linear functional on $V$.
\item Let $V$ be the vector space of $n \times n$ matrices with real coefficients. Then, define the trace map: $T: V \rightarrow \R$ as the trace of a matrix $A$. This is a linear functional on $V$.
\end{enumerate}
\begin{definition}[Dual Space]
Let $V$ be a vector space over a field $K$. Then, the set of all linear functionals on $V$ over $K$ is a vector space over $K$ with addition and scalar multiplication defined by:
\begin{align*}
& (\phi + \sigma)(v) := \phi(v) + \sigma(v) \\
& (k \phi)(v) = k \phi(v)
\end{align*}
This vector space is called the \dfn{dual space} of $V$, denoted by $V^*$.
\end{definition}
\begin{ex}
Consider $V = K^n$. This is the vector space of all $n$-tuples, written as column vectors. Then, $V^*$ can be thought of as the space of all row vectors. We can represent any linear functional $\phi = (a_1, ..., a_n) \in V^*$ as a \dfn{linear form}:
\begin{align*}
\phi(x_1, ..., x_n) = \begin{bmatrix}
a_1 & a_2 & \cdots & a_n
\end{bmatrix} \begin{bmatrix}
x_1 & x_2 & \cdots & x_n
\end{bmatrix}^t = a_1 x_1 + ... + a_n x_n
\end{align*}
\end{ex}
When you choose a basis for a vector space $V$, you obtain an induced basis on the dual $V^*$:
\begin{theorem}
Suppose $\{ v_1, ..., v_n \}$ is a basis of $V$ over $K$. Let $\phi_1 , .., \phi_n \in V^*$ be linear functionals defined by:
\begin{align}
\phi_i(v_j) := \delta_{ij}
\end{align}
Then, $\{ \phi_1, ..., \phi_n \}$ is a basis of $V^*$. This basis is called the \dfn{dual basis}.
\end{theorem}
Theorems giving the relationships between bases and their dual bases:
\begin{theorem}
Let $\{ v_1, ..., v_n \}$ be a basis of $V$; let $\{ \phi_1 , ..., \phi_n \}$ be the dual basis in $V^*$. Then:
\begin{enumerate}[noitemsep]
\item $\forall u \in V$, $u = \phi_1(u)v_1 + ... + \phi_n(u) v_n$
\item For any linear functional $\sigma \in V^*$, $\sigma = \sigma(v_1) \phi_1 + ... + \sigma(v_n) \phi_n$.
\end{enumerate}
\end{theorem}
The change of basis on a vector space induces a change of basis on its dual. This is the point of the following theorem:
\begin{theorem}
Let $\{ v_1, ..., v_n \}$ and $\{ w_1, ..., w_n \}$ be bases of $V$ and let $\{ \phi_1, ... , \phi_n \}$ and $\{ \sigma_1, ..., \sigma_n \}$ be bases of $V^*$, dual to $\{ v_i \}$ and $\{ w_i \}$, respectively. If $P$ is the change of basis matrix from $\{ v_i \}$ to $\{w_i \}$, then $(P^{-1})^t$ is the change of basis matrix from $\{ \phi_i \}$ to $\{\ \sigma_i \}$.
\end{theorem}
\begin{theorem}
If $V$ is a \emph{finite-dimensional} vector space, then $V \cong V^{**}$.
\end{theorem}
The following definition-theorem would have been very useful for the first homework :-)
\begin{definition}[Transpose of a Linear Mapping]
Let $U$, $V$ be vector spaces over $K$. Let $T: V \rightarrow U$ be an arbitrary linear mapping. Let $\phi \in U^*$ be a linear functional. Since linearity is stable under compositions, the composition map $\phi \circ T$ is a linear map $V \rightarrow K$, and this $(\phi \circ T) \in V^*$. Define the following map from $U^* \rightarrow V^*$:
\begin{align*}
\phi \mapsto \phi \circ T
\end{align*}
This map as defined is called the \dfn{transpose of T}. Formally: for each $v \in V$, the transpose map gives us:
\begin{align}
(T^t(\phi))(v) = \phi(T(v))
\end{align}
\end{definition}
\begin{theorem}
The transpose mapping $T^t$ is linear.
\end{theorem}
\begin{theorem}
Let $T: V \rightarrow U$ be linear. Let $A$ be the matrix representation of $T$ with respect to the bases $\{v_i \}$ of $V$ and $\{ u_i \}$ of $U$. Then, the transpose matrix $A^t$ is the matrix representation of $T^t : U^* \rightarrow V^*$ relative to the bases dual to $\{ u_i \}$ and $\{ v_i \}$.
\end{theorem}
\subsection{Notions from Multivariable Cal}
\begin{definition}[Differential]
The \dfn{differential} of a map $f: \R^m \rightarrow \R^n$ at the point $\phi \in \R^m$ is the best linear approximation of the map at the point $\phi$:
\begin{align}
f(q) = f(p) + Df(p) \cdot (q-p) + O(||q-p||)
\end{align}
Here, $Df(p)$ is the differential, which is an $n \times m$ matrix.
\end{definition}
\begin{theorem}[Inverse Function Theorem]
Let $f: \R^n \rightarrow \R^n$ be continuously differentiable in an open set containing $a$ and det$f'(a) \neq 0$. Then, there is an open set $V$ containing $a$ and an open set $W$ containing $f(a)$ such that $f: V \rightarrow W$ has a continuous inverse $f{-1}: W \rightarrow V$ which is differentiable and $\forall y \in W$ satisfies:
\begin{align}
(f^{-1})'(y) = [ f'(f^{-1}(y))]^{-1}
\end{align}
\end{theorem}
\begin{theorem}[Implicit Function Theorem]
Let $f: \R^n \times \R^m \rightarrow \R^m$ be a continuously differentiable function in an open set containing $(a,b)$ and $f(a,b) =0$. Let $M$ be the $m \times m$ matrix:
\begin{align*}
(D_{n+j}f^i (a,b))
\end{align*}
with $1 \leq i, i \leq m$. If det$(M) \neq 0 $, then there exists an open set $A \subseteq \R^n$ containing $a$ and an open set $B \subseteq \R^m$ containing $b$ with the following property: $\forall$ $ x \in A$, $\exists_1$ $g(x) \in B$ such that $f(g, g(x) ) =0$. Moreover, the function $g$ is differentiable.
\end{theorem}
\begin{definition}[Line Integral]
Let $\Omega \subseteq \R^n$ be open. Let $F$ be a smooth vector field. Let $\gamma: [a,b] \rightarrow \Omega$ be an oriented curve. Then, the \dfn{line integral} of $F$ over $\gamma$ is defined as:
\begin{align*}
\idx{\gamma}{} F \cdot d\gamma := \idx{a}{b} F(\gamma(t)) \cdot \gamma'(t) dt
\end{align*}?
\end{definition}
\begin{definition}[Two-Dimensional Curl]
Let $F$ be a smooth vector field. Then, the two-dimensional \dfn{curl} is defined as:
\begin{align*}
\text{curl} (F) := \partial_x F_y - \partial_y F_x
\end{align*}
\end{definition}
\begin{definition}[Unit Normal Vector of a Parameterised Surface]
Let $\mathbb{X}: K \subseteq \R^2 \rightarrow \R^3$ be a parameterisation. Then, the \dfn{unit normal vectors} are:
\begin{align*}
n := \pm \frac{\partial_u \mathbb{X} \times \partial_v \mathbb{X}}{|| \partial_u \mathbb{X} \times \partial_v \mathbb{X} ||}
\end{align*}
\end{definition}
We will state some basic (and important) results from vector calculus: the divergence theorem, green's theorem, and stokes' theorem.
\subsubsection{Divergence}
\begin{theorem}[Divergence Theorem]
Let $F$ be a smooth vector field and let $\Omega$ be a bounded domain with outer normal $n$. Then:
\begin{align}
\iiint_{\Omega} \text{div}F d \Omega = \iint_{\partial \Omega} F \cdot n dS
\end{align}
Where the \dfn{divergence} of a smooth vector field $F$ is given by:
\begin{align*}
\text{div} F := \frac{\partial F_1}{\partial x} + \frac{\partial F_2}{\partial y} + \frac{\partial F_3}{\partial z}
\end{align*}
\end{theorem}
We can write the divergence of a vector field as a dot product with the del operator:
\begin{align*}
\text{div} F = \nabla \cdot F
\end{align*}
\subsubsection{Green's Theorem}
From the divergence theorem, we can deduce Green's theorem. It is given by:
\begin{theorem}[Green's Theorem]
Let $P(x,y)$ and $Q(x,y)$ be smooth functions $\R^2 \rightarrow \R$. Let $\Omega \subseteq \R^2$ be bounded. Then:
\begin{align}
\iint_\Omega \left[ \frac{\partial Q(x,y)}{\partial x} - \frac{\partial P(x,y)}{\partial y} \right] dx dy = \idx{\mathcal{C}}{} P(x,y)dx + Q(x,y)dy
\end{align}
where $\mathcal{C} = \partial \Omega$.
\end{theorem}
There is also a formulation for Green's theorem in terms of the curl of a vector field.
\begin{theorem}[Green's Theorem II]
Let $K$ be a region bounded by a closed, oriented curve $\gamma$. Then, for a smooth vector field $F$ in $K$, we have:
\begin{align}
\idx{\gamma}{} F \cdot d \gamma = \idx{K}{} \text{curl}(F)
\end{align}
\end{theorem}
Finally, we have Stokes' Theorem.
\begin{theorem}
Let $\Omega$ be a smooth, oriented surface bounded by a closed, smooth boundary curve $\partial \Omega$ which is positively oriented. Let $F$ be a smooth vector field. Then:
\begin{align}
\idx{\partial \Omega}{} F \cdot dr = \iint_{\Omega} \text{curl} F \cdot dS
\end{align}
\end{theorem}
\section{Manifolds in $\R^3$}
The aim of this part of the course is to build up to integration on manifolds and the invariant Stokes' theorem. The main purpose of this sections is to develop \emph{coordinate-free} calculus, which clarifies the essence of what is happening (sometimes coordinates can be noisy).
\subsection{Definitions}
\begin{definition}[K-Dimensional Manifold]
A subset $M \subseteq \R^n$ is called a \dfn{k-dimensional manifold} in $\R^n$ if $\forall x \in M$, the following condition is satisfied: $\exists$ an open set $U$ containing $x$ and open set $V \subseteq \R^n$, and a diffeomorphism $h: U \rightarrow V$ such that
\begin{align*}
h (U \cap M) & = V \cap (\R^k \times \{ 0 \} ) \\
& = \{ y \in V\ |\ y^{k+1} = ... = y^n = 0 \}
\end{align*}
In other words, we require that $U \cap M$ is, up to diffeomorphism, $\R^k \times \{ 0 \}$.
\end{definition}
\begin{definition}[$C^\infty$-function] There are two definitions.
\begin{enumerate}[noitemsep]
\item $f: M \rightarrow \R$ is $C^\infty$ if it is $C^\infty$ in each parameterisation.
\item $f: M \rightarrow \R$ is $C^\infty$ if it is locally the restriction of a smooth function of the ambient space: $\forall p \in M$, $\exists V \subseteq \R^n$, $V$ open, $ p \in V$, and $F: V \rightarrow \R$ with $F|_{M \cap V} = f$.
\end{enumerate}
\end{definition}
Before we can do calculus, we need to define vector fields in a \emph{coordinate-free} way on a manifold $M$.
\begin{definition}[Vector Field $V$ on $M$]
The \dfn{vector field} $V$ on $M$ is defined as a function $C^\infty(M) \rightarrow C^\infty(M)$ satisfying three properties:
\begin{enumerate}[noitemsep]
\item $v(f+g) = v(f) + v(g) $ (Linearity I)
\item $v (\alpha f) = \alpha v(f) $ (Linearity II)
\item $v(fg) = fv(g) + gv(f)$ (Leibniz Law; captures the essence of differentiation)
\end{enumerate}
\end{definition}
Using this, we can define a \dfn{derivation} at $x \in \R^n$. First take a derivation $v \in \R^n$, and set:
\begin{align}
v(f) := \frac{d}{dt} \left[ f(x+tv) \right]_{t =v}
\end{align}
This is a \dfn{directional derivative} in the direction $v$.
\begin{definition}[Tangent Bundle]
Given a manifold $M^n$, you can package together all the tangent spaces together into a $2n$-dimensional manifold. You'd then obtain a vector bundle called the \dfn{tangent bundle}:
\begin{align*}
T(M) := \bigsqcup_{p \in M} T_p(M)
\end{align*}
\end{definition}
\subsection{Smooth Maps from $M^m \rightarrow N^n$}
Let $M^m$ and $N^n$ be two manifolds. Consider a smooth map $g$ between them. Fix a point $p \in M^m$. The map $g$ induces a map on the tangent spaces. This map, denoted:
\begin{align*}
D_{g_p}(v): T_p(M) \rightarrow T_{g(p)}(N)
\end{align*}
is called the \dfn{differential} or \dfn{push-forward}. Here, $v$ is a derivation at $p \in M$ and $f$ is a function on $N$.
\begin{definition}[Cotangent Space]
The \dfn{cotangent space} is denoted by $T^*_p(M)$. It is the dual space of $T_p(M)$. Functions on $M$ give elements of $T_p^*(M)$ in the following way:
\begin{align*}
df(v) := v(f)
\end{align*}
where $v \in T_p(M)$. $v(f)$ is a derivation of $f$ in the direction $v$.
\end{definition}
\subsection{Change of Coordinates}
\subsection{Multi-Linear Algebra}
\begin{definition}[$k$-linear map]
Let $V^k := V \times \cdots \times V$ ($k$ times). A function $f:V^k \rightarrow \R$ is called \dfn{k-linear} if it is linear in each of its $k$ arguments.
\end{definition}
A $k$-linear function on $V$ is also called a \dfn{k-tensor} on $V$.
\begin{definition}[Symmetric/Alternating]
A $k$-linear function $f: V^k \rightarrow \R$ is \dfn{symmetric} if:
\begin{align*}
f(v_{\sigma(1)}, ..., v_{\sigma(k)}) = f(v_1, ..., v_k)
\end{align*}
for all permutations $\sigma \in S_k$ (symmetric group on $k$ letters); it is said to be \dfn{alternating} if
\begin{align*}
f(v_{\sigma(1)}, ..., v_{\sigma(k)} ) = (\text{sgn}(\sigma)) f(v_1, ..., v_n)
\end{align*}
\end{definition}
Examples of symmetric functions:
\begin{itemize}[noitemsep]
\item the dot product, $f(v,w) := v \cdot w$ on $\R^n$.
\end{itemize}
Examples of alternating functions:
\begin{itemize}[noitemsep]
\item $f(v_1, ..., v_n) := \det [v_1, ..., v_n]$
\item Cross product $v \times w$ on $\R^3$.
\item Generalisation of a cross product: let $f, g: V \rightarrow \R$ on a vector space $V$. Define $f \wedge g: V \times V \rightarrow \R$ by:
\begin{align*}
( f \wedge g ) (u,v) := f(u) g(v) - f(v) g(u)
\end{align*}
(special case of the wedge product).
\end{itemize}
The space of all alternating $k$-linear functions on a vector space $V$ is denoted by $A_k(V)$. When $k=0$, a $0$-covector is a constant $\Rightarrow$ $A_0(V)$ is the vector space $\R$. A 1-covector is a covector.
\begin{definition}[Tensor Product]
Let $f$ be a $k$-linear function and $g$ an $l$-linear function on a vector space $V$. The \dfn{tensor product} is a $(k+l)-$linear function $f \otimes g$ defined as:
\begin{align}
(f \otimes g)(v_1, ..., v_{k+l}) := f(v_1, ..., v_k) g(v_{k+1}, ..., v_{k+1})
\end{align}
\end{definition}
In order to motivate the next definition, assume that we have two multilinear functions $f$, $g$ on a vector space $V$. We would like to have a product that is alternating. This is why we define the wedge product:
\begin{definition}[Wedge Product]
Let $f \in A_k(V)$ and $g \in A_l(V)$. Then, the \dfn{wedge product} or \dfn{exterior product} is defined as:
\begin{align*}
f \wedge g := \frac{1}{k!l!} A(f \otimes g)
\end{align*}
This can be written out explicitly as:
\begin{align*}
(f \wedge g)(v_1, ..., v_{(k+l)}) = \frac{1}{k!l!} \sum_{ \sigma \in S_{k+l}} f(v_{\sigma(1)}, ..., v_{\sigma(k)} ) g(v_{\sigma(k+1)}, ..., v_{\sigma(k+l)})
\end{align*}
\end{definition}
Remarks:
\begin{itemize}[noitemsep]
\item When $k=0$, this corresponds to scalar multiplication.
\item The coefficient $1/l!k!$ compensates for repetitions in the sum.
\end{itemize}
\begin{prop}
The wedge product is anti-commutative: if $f \in A_k(V)$ and $g \in A_l (V)$, then:
\begin{align*}
f \wedge g = (-1)^{kl} g \wedge f
\end{align*}
\end{prop}
\subsection{Differential Forms in $M^n$}
Differential $k$-forms assign $k$-covectors on the tangent space to each point of an open set $\Omega$. There is a notion of differentiation for differential forms -- the exterior derivative. This is something that turns out to be intrinsic to the manifold.
\begin{definition}[Differential One Form]
A \dfn{covector field} or \dfn{differential 1-form} on an open subset $\Omega \subseteq \R^n$ is a function $\omega$ that assigns to each point $p \in \Omega$ a covector $\omega_p \in T_p^*(\R^n)$.
\end{definition}
Given a $C^\infty$ function $f: \Omega \rightarrow \R$, we can construct the one-form called the \dfn{differential of $f$}, denoted $df$ as follows: let $p \in \Omega$ and let $X_p \in T_p(\Omega)$. Then, define:
\begin{align*}
(df)_p (X_p) := X_p
\end{align*}
\begin{prop}
Let $x^1, ..., x^n$ be the standard coordinates on $\R^n$. Then, at each point $p \in \R^n$, $\{ (dx^1)_p, ..., (dx^n)_p \}$ is the basis of the cotangent space $T_p^*(\R^n)$ dual to the basis $\{ [\partial/\partial x^1]_p, ..., [\partial / \partial x^n]_p \}$ for the tangent space $T_p(\R^n)$.
\end{prop}
\begin{prop}[Differential in terms of coordinates]
If $f: \Omega \rightarrow \R^n$ is $C^\infty$ on $\Omega \subseteq \R^n$ open, then:
\begin{align*}
df = \sum \frac{\partial f}{\partial x^i} dx^i
\end{align*}
\end{prop}
\begin{definition}[Differential form of degree $k$]
A \dfn{differential k-form} on $\Omega \subseteq \R^n$ is a function that assigns to each point $p \in \Omega$ an alternating $k$-linear function on the tangent space $T_p(\R^n)$; i.e., $\omega_p \in A_k(T_p(\R^n))$.
\end{definition}
\begin{itemize}[noitemsep]
\item Basis for $A_k(T_p (\R^n))$:
\begin{align*}
dx_p^I = dx_p^{i_1} \wedge \cdots \wedge dx_p^{i_k},\ 1 \leq i_1 < \cdots < i_k \leq n
\end{align*}
\item For each point $p \in \Omega$, $\omega_p$ can be expressed as a linear combination:
\begin{align*}
\omega_p = \sum a_I(p) dx_p^I,\ 1 \leq i_1 < \cdots < i_k \leq n
\end{align*}
\item General $k$-form on $\Omega$:
\begin{align*}
\omega = \sum a_I dx^I
\end{align*}
\item $\Omega^k(U)$ is the vector space of $C^\infty$ $k$-forms on $U$.
\begin{itemize}[noitemsep]
\item $0$-form on $U$ is a smooth function on $U$.
\end{itemize}
\end{itemize}
The wedge product of two $k$-forms:
\begin{align*}
\omega \wedge \tau := \sum_{I,J \text{ disjoint}} (a_I b_J) dx^I \wedge dx^J
\end{align*}
To make this concrete: let $x,y,z$ be the coordinates on $\R^3$. Then:
\begin{itemize}[noitemsep]
\item $C^\infty$ 1-forms are:
\begin{align*}
fdx + gdy + hdxz
\end{align*}
where $h,y,h$ range over all smooth functions on $\R^3$.
\item $C^\infty$ 2-forms are:
\begin{align*}
f dy \wedge dz + g dx \wedge dz + h dx \wedge dy
\end{align*}
\item $C^\infty$ 3-forms are:
\begin{align*}
f dx \wedge dy \wedge dz
\end{align*}
\end{itemize}
Here are some worked examples of taking the wedge products between differential forms.
\begin{ex}
Consider the 2-form $dx \wedge dy$. Express this in polar coordinates.
\newline
\textbf{Solution:} We have: $r = r \cos \theta$ and $y = r \sin \theta$. By the total derivative rule we have:
\begin{align*}
& dx = \frac{\partial x}{\partial r} dr + \frac{\partial x}{\partial \theta} d \theta \\
& dy = \frac{\partial y}{\partial r} dr + \frac{\partial y}{\partial \theta} d \theta
\end{align*}
and so:
\begin{align*}
& dx = \cos \theta dr - r \sin \theta d \theta \\
& dy = \sin \theta d r + r \cos \theta d \theta
\end{align*}
and so from the properties of wedge products:
\begin{align*}
dx \wedge dy & = \cos \theta r \cos \theta dr \wedge d \theta - r \sin \theta \sin \theta d \theta \wedge dr \\
& = r \cos^2 \theta dr \wedge d \theta - r \sin ^2 \theta d \theta \wedge d r \\
& = r \cos^2 \theta dr \wedge d \theta + r \sin ^2 \theta d r \wedge d \theta \\
& = r ( \cos^2 \theta + \sin ^2 \theta) dr \wedge d \theta \\
& = r dr \wedge d \theta
\end{align*}
Which is what we would expect from standard cal 2.
\end{ex}
In general, if we have a system of equations:
\begin{align*}
& y_1 = a_{11} x_1 + a_{12} x_2 \\
& y_2 = a_{21} x_1 + a_{22} x_2
\end{align*}
and we collect the coefficients $a_{ij}$ into a matrix:
\begin{align*}
A := \begin{bmatrix}
a_{11} & a_{12} \\
a_{21} & a_{22}
\end{bmatrix}
\end{align*}
then we have:
\begin{align*}
d y_1 \wedge d y_2 = \det (A) dx_1 \wedge d x_2
\end{align*}
Which is also not very surprising.
\begin{ex}
Let $f: \R^2 \rightarrow \R^2$, $(x,y) \mapsto (u,v)$ according to:
\begin{align*}
u & = x^2 - y^2 \\
v & = 2xy
\end{align*}
Express $du \wedge dv$ in terms of $dx \wedge dy$.
\newline
\textbf{Solution:} By the total derivative rule:
\begin{align*}
& du = 2x dx - 2y dy \\
& dv = 2x dy + 2y dx
\end{align*}
and so, by the properties:
\begin{align*}
du \wedge dv & = (2x dx - 2ydy ) \wedge (2x dy + 2y dx) \\
& = 2x dx \wedge (2x dy + 2y dx) - 2y dy \wedge (2x dy + 2ydx) \\
& = 4x^2 dx \wedge dy - 4y^2 dy \wedge dx \\
& = 4x^2 dx \wedge dy + 4y^2 dy \wedge dy \\
& = 4(x^2 + y^2) dx \wedge dy
\end{align*}
Note that the quantity $ 4(x^2 + y^2) dx \wedge dy $ depends on how $f$ is defined, so the proper way to refer to this quantity is to say that $ 4(x^2 + y^2) dx \wedge dy $ is the \dfn{pull back} of $du \wedge dv$ via $f$. Mathematically, we would write:
\begin{align*}
f^*(du \wedge dv) = 4(x^2 + y^2) dx \wedge dy
\end{align*}
\end{ex}
This example motivates the following rules for pull backs and wedge products.
\begin{prop}
Let $g$ be a function and let $\alpha$, $\omega$, and $\beta$ be differential forms. Then:
\begin{enumerate}[noitemsep]
\item $g^*(\alpha \wedge \beta) = g^* \alpha \wedge g^* \beta$
\item $g^*(f \omega) = (g^* f) (g^* \omega) $
\end{enumerate}
\end{prop}
\begin{definition}[Exterior Derivative]
We will define the exterior derivative in two steps: first for $0$-forms; then, we will generalise to $k$-forms. The \dfn{exterior derivative} of a smooth function $f$ is the differential $df \in \Omega^1 (U)$. With coordinates:
\begin{align*}
df := \sum \frac{\partial f}{\partial x^i} dx^i
\end{align*}
Now let $k \geq 1$. Set $\omega = \sum_{I} a_I dx^I \in \Omega^k(U)$. Then the \dfn{exterior derivative} is defined as:
\begin{align*}
d \omega & := \sum_{I} da_I \wedge dx^I \\
& = \sum_{I} \left( \sum_{J} \frac{\partial a_I}{\partial x^j } dx^j \right) \wedge dx^I \in \Omega^{k+1}(U)
\end{align*}
\end{definition}
To make this clearer, let's do an example. Let $\omega$ be the 1-form $f dx + g dy$ on $\R^2$. Then:
\begin{align*}
d \omega & = df \wedge dx + df \wedge dy \\
& = (f_x dx + f_y dy ) \wedge dx + (g_x dx + g_y dy ) \wedge dy \text{ (by definition)} \\
& = (g_x - f_x) dx \wedge dy \text{ (by properties of wedge product) }
\end{align*}
Here are two useful properties of the exterior derivative:
\begin{prop}[Properties of the Exterior Derivative]
Let $\alpha \in \Lambda^k(M)$, $\beta \in \Lambda^l(M)$. Let $a, b \in \R$. Then:
\begin{enumerate}[noitemsep]
\item $d(a \alpha + \beta b) = a d \alpha + b d \beta$ (Linearity)
\item $d(\alpha \wedge \beta) = ( d \alpha) \wedge \beta + (-1)^k \alpha \wedge d \beta$ (Product rule)
\item $d(d \alpha) = 0$
\end{enumerate}
\end{prop}
Here are some concrete examples of computing exterior derivatives.
\begin{ex}
Let $\omega = ydx - z dy$. Compute the exterior derivative $d \omega$. \textbf{Solution:}
\begin{align*}
d \omega = dy \wedge dx - dz \wedge dy
\end{align*}
\end{ex}
\begin{ex}
Let $\omega = (x^2+y^2+z^2)(dx \wedge dy + dy \wedge dz)$. Compute the exterior derivative $d \omega$:
\begin{align*}
d \omega & = (2x dx + 2y dy + dz dz) \wedge (dx \wedge dy + dy \wedge dz) \\
& = 2x dx \wedge dy \wedge dz + 2z dz \wedge dx \wedge dy \\
& = (2x + 2y) (dx \wedge dy \wedge dz)
\end{align*}
\end{ex}
\begin{ex}
Let $\omega = \frac{xdy - ydx}{x^2 + y^2}$ be the angular form. Find the exterior derivative $d \omega$.
\newline
\textbf{Solution:} Re-write the form as:
\begin{align*}
(x^2 + y^2) \omega = xdy - ydx
\end{align*}
Now take the exterior derivative of both sides:
\begin{align*}
d ( (x^2 + y^2) \omega ) = d (xdy - ydx)
\end{align*}
Let's first simplify $d ( (x^2 + y^2) \omega )$:
\begin{align*}
d ( (x^2 + y^2) \omega ) & = d(x^2 + y^2) \wedge \omega + (x^2 + y^2) d \omega \text{ (by the product rule) } \\
& = (2x dx + 2y dy ) \wedge w + (x^2 + y^2) d \omega \\
& = (2x dx + 2y dy ) \wedge \frac{x dy - y dx}{x^2 + y^2} - (x^2 + y^2) d \omega
\end{align*}
Now expand out $(dx dx + 2y dy ) \wedge \frac{x dy - y dx}{x^2 + y^2}$:
\begin{align*}
(2x dx + 2y dy ) \wedge \frac{x dy - y dx}{x^2 + y^2} & = 2x dx \wedge \left( \frac{xdy - ydx}{x^2 + y^2} \right) + 2y dy \wedge \left( \frac{x dy - y dx}{x^2 + y^2} \right) \\
& = \frac{1}{(x^2+y^2)} \left[ 2x^2 dx \wedge dy - 2xy dx \wedge dx + 2yx dy \wedge dy - 2y^2 dy \wedge dx \right ] \\
& = \frac{1}{(x^2+y^2)} \left[ (2x^2 + 2y^2) dx \wedge dy \right] \\
& = 2 (dx \wedge dy)
\end{align*}
And so we get:
\begin{align*}
d( (x^2 + y^2) \omega ) = 2 dx \wedge dy + (x^2 + y^2) d \omega
\end{align*}
Now we compute the exterior derivative $d(xdy - ydx)$:
\begin{align*}
d(xdy - ydx) = dx \wedge dy - dy \wedge dx = 2dx \wedge dy
\end{align*}
And so:
\begin{align*}
(x^2 + y^2) d \omega = 0 \iff d \omega = 0
\end{align*}
Since we are in the punctured disc and so $x^2 + y^2 > 0$.
\end{ex}
There is a connection between the exterior derivative and the curl operation from advanced calculus. Precisely: let $\alpha$ be a general one-form of three variables be written as:
\begin{align*}
\alpha = Pdx + Q dy + R dz
\end{align*}
Then, when taking the exterior derivative $d \alpha$ we recover the curl:
\begin{align*}
d \alpha & = d P \wedge dx + dQ \wedge dy + d R \wedge dz \\
& = (R_y - Q_z) dy \wedge dz + (P_z - R_x) dz \wedge dx + (Q_x - P_y) dx \wedge dy \\
& = \nabla \times F
\end{align*}
\begin{definition}[Closed and Exact Forms]
Let $\omega$ be a $k$-form on $U$. We say that $\omega$ is \dfn{closed} if $d\omega =0$. We say that $\omega$ is \dfn{exact} if $\exists$ a $(k-1)$-form $\tau$ such that $\omega = d \tau$. Every exact form is closed.
\end{definition}
\subsection{Change of Variables for Integrals in $\R^n$}
\subsection{Integrating a $n$-Form on $M^n$ ($\idx{M}{} \omega$)}
In this section, we will build up to the invariant Stokes' theorem. We will first start with line integrals, and how they can be written in terms of forms.
\subsubsection{Line Integrals}
The objective is to compute the following object:
\begin{align}
\idx{\gamma}{} \omega
\end{align}
where $\omega$ is a one-form and $\gamma$ is a path or curve. The general setup is as follows:
\begin{enumerate}[noitemsep]
\item Suppose that the variables in the differential one-form are $x_1, ..., x_n$. We will collect these into a vector $x:= (x_1, ..., x_n)$ and write the one-form $\omega$ as:
\begin{align*}
\omega = \sum_{i=1}^n F_k dx_k
\end{align*}
where $F_k$ is:
\begin{align*}
F_k = F_k(x) = F_k(x_1, ..., x_n)
\end{align*}
\item There are two ways to describe $\gamma$:
\begin{enumerate}[noitemsep]
\item A system of parametric equations: $x_k := x_k(t)$
\item In vector form: $x=x(t)$ where $t \in [a,b]$.
\end{enumerate}
\end{enumerate}
When $\gamma$ is just a standard path in $[a,b]$ (i.e., one that corresponds to standard Cal 2 integration), then we just have the standard definite integral when taking the \dfn{pull back} of $\omega$:
\begin{align*}
\idx{\gamma}{} \omega = \idx{a}{b} F(t) dt
\end{align*}
You can think of the pull back as ``substituting'' $t$ into $F$. For the more general case, we \dfn{pull back} a differential form $\omega$ in $n$ variables $x_j$'s via $\gamma$ to get a differential form on \emph{one} variable $t$. This is denoted by $\gamma^* \omega$. You obtain it by the substitution:
\begin{align*}
x_j = x_j(t)
\end{align*}
into $\omega$. So:
\begin{align*}
\omega = \sum_{k=1}^n F_k dx_k \text{ --PULL BACK: } \rightarrow \gamma^*(\omega) = \sum_{k=1}^n F_k(x(t)) dx_k (t) = \sum_{k=1}^n F_k(x(t))x'_k(t) dt
\end{align*}
So, we can formally define a line integral in the general case.
\begin{definition}[Line Integral -- Differential Forms]
Let $\omega$ be a one-form given by $\omega = \sum_{k=1}^n F_k(x) dx_k$ and let $\gamma$ be a curve. Then, the \dfn{line integral} is defined as:
\begin{align}
\idx{\gamma}{}\omega := \idx{a}{b} \gamma^* \omega
\end{align}
where $\gamma^* \omega = \sum_{k=1}^n F_k(x(t)) \frac{dx_k}{dt} dt$.
\end{definition}
I find that all of this stuff is super confusing without clear examples, so here are some worked examples of line integrals of one-forms:
\begin{ex}
Compute the line integral:
\begin{align*}
\idx{\gamma}{} xdy + y dz + zdx
\end{align*}
For the following three paths connecting the point $(0,0,0)$ to $(1,1,1)$:
\begin{enumerate}[noitemsep]
\item $\gamma = \alpha$: $(x,y,z) = (t,t,t)$ where $t \in [0,1]$.
\item $\gamma = \beta$: $(x,y,z) = (t,t^2, t^3)$ where $t \in [0,1]$.
\item $\gamma = \zeta$: $(x,y,z) = (t^2, t^4, t^6)$ where $t \in [0,1]$.
\end{enumerate}
Computing the pullbacks gives us:
\begin{enumerate}[noitemsep]
\item $\alpha^* \omega = tdt + dtd + tdt = 3tdt$
\item $\beta^* \omega = td(t^2) + t^2 d(t^3) + t^3 dt = (2t^2 + 3t^4 + t^3) dt $
\item $\zeta^* \omega = (4t^5 + 6t^9 + 2t^7)dt$.
\end{enumerate}
Carrying out the integration:
\begin{align*}
& \idx{\alpha}{} \omega = \idx{0}{1} 3tdt = 3/2 \\
& \idx{\beta}{} \omega = \idx{0}{1} (2t^2 + 3t^4 + t^3)dt = 91/60 \\
& \idx{\zeta}{} \omega = \idx{0}{1} (4t^5 + 6t^9 + 2t^7) = 91/60
\end{align*}
\end{ex}
\begin{ex}
Compute the line integral:
\begin{align*}
\idx{\gamma}{} \omega := \idx{\gamma}{} \frac{xdy - ydx}{x^2 + y^2}
\end{align*}
where $\gamma$ is the path around the unit circle once in the anti-clockwise direction parameterised by $x = \cos t$ and $ y = \sin t$, $t \in [0, 2 \pi ]$.
\newline
\newline
\textbf{Solution:} Set:
\begin{align*}
\omega := \frac{xdy - ydx}{x^2 + y^2}
\end{align*}
Compute the pullback:
\begin{align*}
\gamma^* \omega & = \frac{x(t) dy(t) - y(t) dx(t)}{(x(t))^2 + (y(t))^2} \\
& = \frac{\cos (t) d ( \sin(t)) - \sin (t) d ( \cos(t)) }{( \cos(t))^2 + ( \sin(t))^2} \\
& = \frac{\cos^2(t) + \sin^2(t)}{\cos^2(t) + \sin^2(t)} \\
& = 1
\end{align*}
and so the integral becomes:
\begin{align*}
\idx{\gamma}{} \omega = \idx{0}{2 \pi} dt = 2 \pi
\end{align*}
\end{ex}
\subsubsection{Surface Integrals}
Now the objective is to compute the following surface integral:
\begin{align*}
\iint_\sigma \omega
\end{align*}
of a two-form $\omega$ over a parameterised surface $\sigma \subseteq \R^3$.
\begin{definition}[Surface Integral -- Differential Forms]
Let $\omega$ be a two form. Let $\sigma$ be parameterised as:
\begin{align*}
x = x(u,v) = (x_1(u,v), x_2(u,v), x_3(u,v))
\end{align*}
where $(u,v)$ runs through the rectangle $[a,b] \times [c,d]$. Then, the \dfn{surface integral} is defined as:
\begin{align}
\iint_\sigma \omega = \iint_R \sigma^* \omega = \iint_R f(u,v) du dv = \idx{a}{b} du \idx{c}{d} f(u,v) dv
\end{align}
\end{definition}
This is best explained through an example.
\begin{ex}
Let $\omega := x dy \wedge dz + y dz \wedge dz + z dx \wedge dy$ be the two-form. Suppose we want to integrate this over the parameterised surface $\sigma: R \rightarrow \R^3$, $R:= [0, 2 \pi] \times [- \pi /2, \pi /2 ]$ given by:
\begin{align*}
\sigma (\theta, \varphi) = ( \cos \theta \cos \phi, \sin \theta \cos \phi, \sin \phi )
\end{align*}
Compute the surface integral $\idx{\sigma}{} \omega$.
\newline
\textbf{Solution:} By definition, we have $\iint_\sigma \omega = \iint_R \sigma^* \omega$, so we first need to compute the pull back of $\omega$ under $\sigma$. Analogously to the line integral case, we have:
\begin{align*}
\sigma^* \omega = x_1(\theta, \varphi) \sigma^* (dy \wedge dz ) + x_2( \theta, \varphi) \sigma^*(dz \wedge dx) + x_3(\theta, \varphi) \sigma^*(dx \wedge dy)
\end{align*}
By the properties of the push-back and wedge products, we can re-write this as:
\begin{align*}
\sigma^* \omega = x_1(\theta, \varphi) \sigma^* dy \wedge \sigma^* dz + x_2( \theta, \varphi) \sigma^*dz \wedge \sigma^* dx + x_3(\theta, \varphi) \sigma^* dx \wedge \sigma^* dy
\end{align*}
Applying the properties once more:
\begin{align*}
\sigma^* dx & = d \sigma^* x = d( \cos \theta \cos \varphi) = - \sin \theta \cos \varphi d \theta - \cos \theta \sin \varphi d \varphi \\
\sigma^* d y & = d \sigma^* y = d( \sin \theta \cos \varphi ) = \cos \theta \cos \varphi d \theta - \sin \theta \sin \varphi d \varphi \\
\sigma^* d z & = d \sigma^* z = d( \sin \varphi ) = \cos \varphi d \varphi
\end{align*}
and so the wedge products are:
\begin{align*}
\color{red} \sigma^* dy \wedge \sigma^* dz \color{black} & = ( \cos \theta \cos \varphi d \theta - \sin \theta \sin \varphi d \varphi ) \wedge \cos \varphi d \varphi \\
& = \cos \theta \cos \varphi \cos \varphi d \theta \wedge d \varphi \\
& = \color{red} \cos \theta \cos^2 \varphi d \theta \wedge d \varphi \\
\color{red} \sigma^* dz \wedge \sigma^* dx \color{black} & = \cos \varphi d \varphi \wedge ( - \sin \theta \cos \varphi d \theta - \cos \theta \sin \varphi d \varphi ) \\
& = - \cos^2 \varphi \sin \theta d \varphi \wedge d \theta \\
& = \color{red} \cos^2 \varphi \sin \theta d \theta \wedge d \varphi \\
\color{red} \sigma^* dx \wedge \sigma^* dy \color{black} & = (- \sin \theta \cos \varphi d \theta - \cos \theta \sin \varphi d \varphi ) \wedge ( \cos \theta \cos \varphi d \theta - \sin \theta \sin \varphi d \varphi) \\
& = ( - \sin \theta \cos \varphi d \theta ) \wedge ( \cos \theta \cos \varphi d \theta - \sin \theta \sin \varphi d \varphi) - ( \cos \theta \sin \varphi d \varphi) \wedge ( \cos \theta \cos \varphi d \theta - \sin \theta \sin \varphi d \varphi) \\
& = \color{red} \cos \varphi \sin \varphi d \theta \wedge d \varphi
\end{align*}
Substitute these values into
\begin{align}
\sigma^* \omega = \color{blue} x_1(\theta, \varphi) \color{red} \sigma^* dy \wedge \sigma^* dz \color{black} + \color{blue} x_2( \theta, \varphi) \color{red} \sigma^*dz \wedge \sigma^* dx \color{black} + \color{blue} x_3(\theta, \varphi) \color{red} \sigma^* dx \wedge \sigma^* dy
\end{align}
and we obtain:
\begin{align*}
\sigma^* \omega & = \color{blue} \cos \theta \cos \varphi \color{red} d y \wedge dz \color{black} + \color{blue} \sin \theta \cos \varphi \color{red} dz \wedge dx \color{black} + \color{blue} \sin \varphi \color{red} dx \wedge dy \color{black} \\
& = \color{blue} \cos \theta \cos \varphi \color{red} \cos \theta \cos^2 \varphi d \theta \wedge d \varphi \color{black} + \color{blue} \sin \theta \cos \varphi \color{red} \cos^2 \varphi \sin \theta d \theta \wedge d \varphi \color{black} + \color{blue} \sin \varphi \color{red} \cos \varphi \sin \varphi d \theta \wedge d \varphi
\end{align*}
After re-grouping and simplifying, we obtain:
\begin{align*}
\sigma^* \omega = \cos \varphi d \theta \wedge d \varphi
\end{align*}
And so the surface integral becomes:
\begin{align*}
\iint_\sigma \omega & = \iint_R \cos \varphi d \theta \wedge d \varphi \\
& = \idx{0}{2 \pi} d \theta \idx{-\pi /2}{\pi /2} \cos \varphi d \varphi \\
& = \idx{0}{2 \pi} [ \sin \varphi ]_{\varphi = -\pi / 2}^{\varphi = \pi / 2} d \theta \\
& = \idx{0}{2 \pi} 2 d \theta \\
& = 4 \pi
\end{align*}
\end{ex}
In order to properly get to the Generalised Stokes' theorem, we need some notation / review from Ad Cal: Let $\hat{i} = (1,0,0)$, $\hat{j} = (0,1,0)$ and $\hat{k} = (0,0,1)$ denote the standard basis vectors in $\R^3$, and let the following be the radial vector:
\begin{align*}
r := (x,y,z) = x \hat{i} + y \hat{j} + z \hat{k}
\end{align*}
Then, the differential $dr$ is given by:
\begin{align}
dr = (dx, dy , dz) = dx \hat{i} + dy \hat{j} + dz \hat{k}
\end{align}
Now let $F := (P,Q,R)$ be a vector field. This justifies the following expression that we had for line integrals:
\begin{align*}
\idx{\gamma}{} F \cdot dr = \idx{\gamma}{} P dx + Q dy + R dz = \idx{\gamma}{} \omega
\end{align*}
We have the following identity for the ``surface area'' element of a surface integral, $dS$:
\begin{align}
dS = \frac{1}{2} (dr \times dr)
\end{align}
We will use this identity to compute the pull-back of a parameterisation. Let $\sigma$ be a bounded parametric surface. Then, we have the following identity:
\begin{align}
\sigma^* dS = (r_u \times r_v) du \wedge dv
\end{align}
Which gives us the following definition of the surface integral in terms of differential forms:
\begin{align}
I = \iint_\sigma F \cdot dS \iint_D \sigma^* \alpha_F
\end{align}
where $\alpha_F = F \cdot dS$. This is best explained with an example:
\begin{ex}
Let $\omega := x dy dz + y dz dx + z dx dy$. In terms of vector fields, this can be written as $F \cdot dS$, where $F = (P, Q, R) = (x,y,z)$. Parameterise the sphere as:
\begin{align*}
& x = \cos \theta \cos \varphi \\
& y = \sin \theta \cos \varphi \\
& z = \sin \varphi
\end{align*}
Then, $\sigma^* (F \cdot dS) = F \cdot (r_u \times r_v) du dv$, which is equal to:
\begin{align}
\det \begin{bmatrix}
P & Q & R \\
x_u & y_u & z_u \\
x_v & y_v & z_v
\end{bmatrix}
\end{align}
So, carrying out this calculation gives:
\begin{align*}
\sigma^*(F \cdot dS) & = \det \begin{bmatrix}
\cos \theta \cos \varphi & \sin \theta \cos \varphi & \sin \varphi \\
- \sin \theta \cos \varphi & \cos \theta \cos \varphi & 0 \\
- \cos \theta \sin \varphi & - \sin \theta \sin \varphi & \cos \varphi
\end{bmatrix} \\
& = \cos \theta d \theta \wedge d \varphi
\end{align*}
\end{ex}
We can write the surface element, in general, as:
\begin{align}
dS := \sqrt{(dy \wedge dz)^2 + (dz \wedge dx)^2 + (dx \wedge dy)^2}
\end{align}
and the area of a parameterised region $\sigma$ is given by:
\begin{align}
\iint_\sigma dS := \iint_D \sigma^* dS
\end{align}
where $D$ is the region of parameterisation.
\subsubsection{Generalised Stokes' Theorem}
Green's Theorem and the classical Stokes' theorem are really the same theorem for 1-forms, just in different dimensions ($\R^2$ vs $\R^3$). The theorem is given by:
\begin{theorem}
Let $M$ be an oriented $n$-dimensional manifold with boundary $\partial M$, where $\partial M$ is $(n-1)$-dimensional. Let $\omega$ be an $(n-1)$-form defined on $M$. Then we have:
\begin{align}\label{stokes}
\idx{M}{} d \omega = \idx{\partial M}{} \omega
\end{align}
\end{theorem}
The cases of $n=2$ and $n=3$ correspond to Greens' theorem and the classical stokes' theorem, respectively:
When $n=2$, a general one-form can be written as $\omega = Pdx + Qdy$. Then, (\ref{stokes}) becomes:
\begin{align}
\iint_S \left[ \frac{\partial{Q}}{\partial x } - \frac{\partial P}{\partial y} \right] dx \wedge dy = \idx{\partial S}{} P dx + Q dy
\end{align}
Observe tha this is Green's theorem. When $n=3$, then a general one-form can be written as $\omega = P dx + Q dy + R dz$. Then, (\ref{stokes}) becomes:
\begin{align}
\iint_S \text{curl}(F) \cdot dS = \idx{\partial S}{} F \cdot dr
\end{align}
Observe that this is the classical Stokes' theorem. Applications of Stokes' theorem are best explained by examples.
\begin{ex}
Verify that the area of a planar region surrounded by a loop is given by $\frac{1}{2} \idx{\gamma}{} x dy - y dx$. Use this to find the area $A_e$ of the region surrounded by the ellipse $(x^2/a^2) + (y^2 / b^2) =1$, where $a, b \in ]0, \infty[$.
\newline
\newline
\textbf{Solution:} Recall that the area of a region $D \subseteq \R^2$ is given by:
\begin{align}
A(D) = \iint_D dx \wedge dy
\end{align}
We have:
\begin{align*}
\frac{1}{2} \idx{\gamma}{} x dy - y dz = \idx{\partial S}{} \omega
\end{align*}
So, set $\omega := x dy - y dx$. Then, the exterior derivative becomes:
\begin{align*}
d(xdy - ydx) & = d(xdy) - d(ydx) \\
& = dx \wedge dy - dy \wedge dx \\
& = 2 dx \wedge dy
\end{align*}
And so, by (\ref{stokes}) (Stokes'), we have:
\begin{align}
\frac{1}{2} \idx{\gamma}{} x dy - y dx = \frac{1}{2} \iint_D d \omega = \frac{1}{2} \iint_D 2 dx \wedge dy = \iint_D dx \wedge dy
\end{align}
which verifies the first statement. We can now use this to compute the area of the ellipsoid. The parametrisation is $x = a \cos (t)$ $y = b \sin (t)$, $t \in [0, 2 \pi ]$. Plugging this into the formula verified above, we obtain:
\begin{align*}
A_e & = \frac{1}{2} \idx{\gamma}{} x dy - y dx \\
& = \frac{1}{2} \idx{0}{2 \pi} a \cos ( t ) d (b \sin (t) ) - b \sin (t) d ( a \cos (t) ) \\
& = \frac{1}{2} \idx{0}{2 \pi} a \cos (t) b \cos(t) + b \sin (t) a \sin (t) \\
& = \frac{1}{2} \idx{0}{ 2 \pi} ab (\cos^2 (t) + \sin ^2 (t) ) \\
& = \frac{1}{2} \idx{0}{ 2 \pi} ab \\
& = ab \pi
\end{align*}
\end{ex}
\begin{ex}
Find the line integral $\idx{\gamma}{} \omega$, where $\omega = xy dy + ydz$, and $\gamma$ is a path running along the boundary of the parallelogram, starting from its vertex $A = (1,1,0)$, passing vertices $B = (2,3,1)$, $C = (2,5,2)$, $D = (1,3,1)$, and back to $A$.
\newline
\newline
\textbf{Solution:} We will apply Stokes' theorem. We can parameterise this by:
\begin{align*}
\sigma(u,v) & = OA + u AB + v AD \\
& = (1,1,0) + u(1,2,1) + v(0,2,1) \\
& = (1+u, 1+2u + 2v, u+v)
\end{align*}
where $u,v \in [0,1]$. This defines a parameterisation. By construction, $\gamma = \partial P$. By Stokes' theorem:
\begin{align*}
\idx{\gamma}{} \omega = \idx{\partial P}{} \omega = \idx{P}{} d \omega
\end{align*}
Set $\omega = xy dy + y dz$. Then:
\begin{align*}
d \omega & = d (xy dy ) + d(ydz) \\
& = d(xy) \wedge dy + dy \wedge dz \\
& = (ydx + xdy ) \wedge dy + dy \wedge dz \\
& = y dx \wedge dy + x dy \wedge dy + dy \wedge dz \\
& = y dx \wedge dy + dy \wedge dz
\end{align*}
Now, by the definition of a surface integral:
\begin{align*}
\iint_P d \omega = \iint_{[0, 1] \times [0,1]} \sigma^* d \omega
\end{align*}
\begin{align*}
\sigma^* d \omega & = (1+2u+2v) d (1+u) \wedge d(1+du + 2v) + d(1 + 2u + 2v) \wedge d(u+v )
\end{align*}
The constants in the $d( \cdot )$ drop out:
\begin{align*}
\sigma^* d \omega & = (1+2u+2v) du \wedge d(2u + 2v) + d (2u + 2v) \wedge d(u+v) \\
& = (1+2u+2v) du \wedge (d(2u) + d(2v)) + (d(2u) + d(2v)) \wedge d(u+v) \\
& = 2 (1+2u+2v) du \wedge dv + (2du + 2dv) \wedge du + (2 du + 2 dv) \wedge dv \\
& = 2 (1+2u+2v) du \wedge dv + 2 dv \wedge du + 2 du \wedge dv \\
& = (2+4u + 4v) du \wedge dv
\end{align*}
Plugging this into the integral gives:
\begin{align*}
\iint_P d \omega = \idx{0}{1} \idx{0}{1} [2 + 4u + 4v] du dv = 6
\end{align*}
\end{ex}
\section{Curves}
There are two subsets of differential geometry: classical differential geometry and global differential geometry. The objective of \dfn{classical differential geometry} is to study the local properties of curves and surfaces. The objective of \dfn{global differential geometry} is to study the influence of local properties on global behaviour.
\subsection{Definitions}
\begin{definition}[Parameterised Differentiable Curve]
A \dfn{parameterised differentiable curve} is a differentiable map $ \alpha: I \rightarrow \R^3$ of an open interval $I = ]a,b[$ of the real line $\R$ into $\R^3$. The image of $\alpha$ is called the \dfn{trace} of $\alpha$.
\end{definition}
Some examples of parameterised curves include:
\begin{itemize}[noitemsep]
\item The helix: $\alpha(t) = (a \cos (t), a \sin(t), bt)$ for $t \in \R$.
\item The map $\alpha: \R \rightarrow \R^2$, $t \in \R$, is a parameterised differentiable curve.
\end{itemize}
\begin{definition}[Norm on $\R^3$]
Let $u = (u_1, u_2, u_3) \in \R^3$. The \dfn{norm} of $u$ is:
\begin{align*}
|| u || := \sqrt{ u_1^2 + u_2^2 + u_3^3}
\end{align*}
\end{definition}
\begin{definition}[Inner Product on $\R^3$]
Let $u = (u_1, u_2, u_3)$ and $v= (v_1, v_2, v_3)$ belong to $\R^3$ and let $\theta \in [0, \pi]$ be the angle formed between $u,v$. The \dfn{inner product} is defined by:
\begin{align}
u \cdot v := || u || ||v|| \cos (\theta)
\end{align}
\end{definition}
It satisfies the following properties:
\begin{enumerate}[noitemsep]