-
Notifications
You must be signed in to change notification settings - Fork 0
/
MA1101R Final.tex
472 lines (402 loc) · 22 KB
/
MA1101R Final.tex
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
\documentclass[10pt,portrait]{article}
\usepackage[portrait]{geometry}
\input{math_header}
% Format inherited from <MA1101R Cheatsheet 17/18 Sem 1 Finals>
% Original document is by Lee Yiyuan and Eugene Lim
%
% All the theorems are numbered according to <LINEAR ALGEBRA - Concepts and
% Techniques on Euclidean Space>, ISBN 978-981-3152-88-5, Second Edition (2016)
% -----------------------------------------------------------------------
\title{MA1101R Cheatsheet 19/20 Semester 1 Final}
\begin{document}
\begin{center}
{\large MA1101R Cheatsheet 19/20 Semester 1 Final}\\{by Howard Liu}
\end{center}
\footnotesize
\begin{multicols}{2}
\begin{justifying}
\setlength{\premulticols}{1pt}
\setlength{\postmulticols}{1pt}
\setlength{\multicolsep}{1pt}
\setlength{\columnsep}{2pt}
\section{Matrices}
\begin{namedthm*}{Theorem 1.2.7}
If \textbf{augmented matrices} of two systems of linear equations are row equivalent, then the two systems have the same set of solutions. (\(\ast\) Even for two homogeneous linear systems, we still need to say that \(\begin{pmatrix}[c|c]\matr{A} & \matr{0}\end{pmatrix}\) is row equivalent to \(\begin{pmatrix}[c|c]\matr{B} & \matr{0}\end{pmatrix}\), not that \(\matr{A}\) is row equivalent to \(\matr{B}\).)
\end{namedthm*}
\begin{namedthm*}{Example 1.4.10}
Suppose augmented matrix \(\matr{R}\) is in (R)REF:
\begin{enumerate}
\item LS has no solution \\
\(\Leftrightarrow\) Last column of \(\matr{R}\) is pivot.
\item LS has one unique solution \\
\(\Leftrightarrow\) \textbf{Only} last column of \(\matr{R}\) is non-pivot.
\item LS has infinite number of solution \\
\(\Leftrightarrow\) At least one column other than the last one is non-pivot \\
\(\Leftrightarrow\) Number of variables $>$ Number of non-zero rows in \(\matr{R}\) \\
(\(\ast\) \# non-pivot columns in (R)REF \(- 1 =\) \# unique solutions)
\end{enumerate}
\end{namedthm*}
\begin{namedthm*}{Theorem 6.1.8}
\(\matr{A}\) is invertible when:
\begin{enumerate}
% T2.4.7
\item \(\exists \matr{B}\) s.t. \(\matr{AB} = \matr{I} \lor \matr{BA} = \matr{I}\)
\item Refer to \(\textbf{Theorem 2.4.7.2}\) below
\item \(\rref(\matr{A}) = \matr{I}\)
\item \(\matr{A}\) is a product of elementary matrices
% T2.5.19
\item \(\det(\matr{A}) \ne 0\)
% T3.6.11
\item Rows of \(\matr{A}\) is a basis of \(\mathbb{R}^n\)
\item Columns of \(\matr{A}\) is a basis of \(\mathbb{R}^n\)
% T6.1.8
\item 0 is not an eigenvalue of \(\matr{A}\)
\end{enumerate}
\end{namedthm*}
\begin{namedthm*}{Remark 2.3.4 (Cancellation Laws for Matrices)}
Let \(\matr{A}\) be an invertible \(m \times m\) matrix,
\begin{enumerate}[label=(\alph*)]
\item If \(\matr{B}_1\) and \(\matr{B}_2\) are \(m \times n\) matrices with \(\matr{AB_1} = \matr{AB_2}\), then \(\matr{B}_1 = \matr{B}_2\)
\item If \(\matr{C}_1\) and \(\matr{C}_2\) are \(n \times m\) matrices with \(\matr{C_1A} = \matr{C_2A}\), then \(\matr{C}_1 = \matr{C}_2\)
\end{enumerate}
\end{namedthm*}
\begin{namedthm*}{Theorem 2.4.7.2 (generalised)}
Relationship between singularity of \(\matr{A}\) and the number of solutions of a linear system \(\matr{Ax} = \matr{b}:\)
\begin{enumerate}
\item \(\matr{A}\) is singular \(\Leftrightarrow \matr{Ax} = \matr{b}:\) has $\infty$ solutions (only case for homogeneous LS) or no solutions
\item \(\matr{A}\) is invertible \(\Leftrightarrow \matr{Ax} = \matr{b}:\) has one unique solution (trivial solution for homogeneous LS)
\end{enumerate}
\end{namedthm*}
\begin{namedthm*}{Definition 2.5.2}
Let \(\matr{A} = \left(a_{ij}\right)\) be an \(n \times n\) matrix. Let \(\matr{M}_{ij}\) be an \(\nobreak{(n - 1)\times (n - 1)}\) matrix obtained from \(\matr{A}\) by deleting the \(i\)th row and the \(j\)th column. Then the \textit{determinant} of \(\matr{A}\) is defined as
\[
\det(\matr{A}) =
\begin{cases}
a_{11} & \text{if \(n = 1\)} \\
a_{11}A_{11} + \cdots + a_{1n}A_{1n} & \text{if \(n > 1\)}
\end{cases}
\]
where
\[
A_{ij} = (-1)^{i + j} \det\left(\matr{M_{ij}}\right)
\]
The number \(A_{ij}\) is called the \((i, j)\)\textit{-cofactor} of \(\matr{A}\).
\end{namedthm*}
\begin{namedthm*}{Theorem 2.5.8}
The determinant of a triangular matrix is equal to the product of its diagonal entries.
\end{namedthm*}
\begin{namedthm*}{Theorem 2.5.12 (added-on)}
The determinant of a square matrix is 0 when:
\begin{enumerate}
\item it has two identical rows, or
\item it has two identical columns
\item any row/column of its (R)REF is zero
\end{enumerate}
\end{namedthm*}
\begin{namedthm*}{Theorem 2.5.15}
Let \(\matr{A}\) be a square matrix. \(k\) is a non-zero constant.
\begin{enumerate}
\item \(\matr{A} \xrightarrow{k\vect{R}_i} \matr{B} \Rightarrow \det(\matr{B}) = k\det(\matr{A})\)
\item \(\matr{A} \xrightarrow{\vect{R}_i \leftrightarrow \vect{R}_j} \matr{B} \Rightarrow \det(\matr{B}) = -\det(\matr{A})\)
\item \(\matr{A} \xrightarrow{\vect{R}_i + k\vect{R}_j} \matr{B} \Rightarrow \det(\matr{B}) = \det(\matr{A})\)
\item Let \(\matr{E}\) be an elementary matrix of the same size as \(\matr{A}\). Then \(\det(\matr{EA}) = \det(\matr{E})\det(\matr{A})\).
\end{enumerate}
\end{namedthm*}
\begin{namedthm*}{Remark 2.5.18}
Since \(\det(\matr{A}) = \det(A^T)\), theorem 2.5.15 holds if ``rows" are changed to ``columns".
\end{namedthm*}
\begin{namedthm*}{Theorem 2.5.22}
Let \(\matr{A}\) and \(\matr{B}\) are two square matrices of order \(n\) and \(c\) is a scalar. Then
\begin{enumerate}
\item \(\det(c\matr{A}) = c^n\det(\matr{A})\)
\item \(\det(\matr{AB}) = \det(\matr{A})\det(\matr{B})\)
\item if \(\matr{A}\) is invertible, \(\det(\matr{A}^-1) = \frac{1}{\det(\matr{A})}\)
\end{enumerate}
\end{namedthm*}
\begin{namedthm*}{Definition 2.5.24}
Let \(\matr{A}\) be a square matrix of order \(n\). Then the \textit{(classical) adjoint} of \(\matr{A}\) is the \(n \times n\) matrix
\[
\adj(\matr{A}) = \left(A_{ij}\right)_{n \times n}^T
\]
where \(A_{ij}\) is the \((i, j)\)-cofactor of \(\matr{A}\).
\end{namedthm*}
\begin{namedthm*}{Theorem 2.5.27 (Cramer's Rule)}
Suppose \(\matr{A}\vect{x} = \vect{b}\) is a linear system where \(\matr{A}\) is an \(n \times n\) matrix. Let \(\matr{A_i}\) be the matrix obtained from \(\matr{A}\) be replacing the \(i\)th column of \(\matr{A}\) by \(\vect{b}\). If \(\matr{A}\) is invertible, then the system has only one solution
\[
\vect{x} = \frac{1}{\det(\matr{A})}\begin{pmatrix}\det\left(\matr{A_1}\right) \\ \vdots \\ \det\left(\matr{A_n}\right) \end{pmatrix}
\]
\end{namedthm*}
\begin{namedthm*}{Mixed Notes 1}
\(\matr{A}^{-1}\) is able to be computed by:
\begin{enumerate}
\item Find \(\matr{B}\) s.t. \(\matr{AB} = \matr{I} \lor \matr{BA} = \matr{I}\)
\item Find using \textbf{Theorem 2.5.25}: \(\matr{A}^{-1} = \frac{1}{\det(\matr{A})}\adj(\matr{A})\)
\item Find using: \(\begin{pmatrix}[c|c] \matr{A} & \matr{I}\end{pmatrix} \xrightarrow{GJE} \begin{pmatrix}[c|c] \matr{I} & \matr{A}^{-1}\end{pmatrix}\)
\end{enumerate}
\end{namedthm*}
\begin{namedthm*}{Mixed Notes 2}
\(\det(\matr{A})\) is able to be computed by:
\begin{enumerate}
\item Using \textbf{Theorem 2.5.2}
\item Using cross multiplication (for \(2 \times 2\) and \(3 \times 3\) matrices only)
\item Doing some ERO (e.g. GE, consider \textbf{Thoerem 2.5.15}) and making it triangular then using \textbf{Theorem 2.5.8} or making it have properties in \textbf{Theorem 2.5.12}
\item Using \textbf{Theorem 2.5.22}
\end{enumerate}
\end{namedthm*}
\begin{namedthm*}{Mixed Notes 3}
Some random notes:
\begin{enumerate}
\item In \(\mathbb{R}^n\) where \(n \ge 2\), a set with 1 parameter is a line and that with 2 parameters is a space.
\item \(\matr{M}^2 + \matr{M} = \matr{I} \Rightarrow \matr{M}(\matr{M} + \textcolor{red}{\matr{I}}) = \matr{I}\) (Don't put that \(\matr{I}\) to be scalar 1!)
\item Two matrices have same RREF \(\Leftrightarrow\) They are row equivalent
\item In exam, express a matrix in the form \(\matr{A} = (a_{ij})_{m \times n}\). \textbf{DO NOT} use dots form
\item When using ERO \(\vect{R}_i = \frac{1}{k}\vect{R}_j\), discuss whether \(k\) is 0 when necessary
\end{enumerate}
\end{namedthm*}
\begin{namedthm*}{Mixed Notes 4}
Generally, for (square) matrices \(\matr{A}\) and \(\matr{B}\),
\begin{enumerate}
\item \(\matr{AB} \ne \matr{BA}\)
\item \((\matr{AB})^2 \ne \matr{A}^2\matr{B}^2\)
\item \(\matr{AB} = 0 \nRightarrow \matr{A} = 0 \lor \matr{B} = 0\)
\item \(\matr{A}^2 = I \nRightarrow \matr{A} = \pm \matr{I}\) (For example: 2 EMs of 2nd type ERO)
\end{enumerate}
\end{namedthm*}
\begin{namedthm*}{Mixed Notes 5}
When expanding a row/column with cofactors of the other row/column, 0 will be yielded:
\[
\sum_{m=1}^n a_{im}A_{jm} = \sum_{m=1}^n a_{mi}A_{mj} = 0, \text{ for some } i \ne j
\]
\end{namedthm*}
\section{Euclidean Spaces}
\begin{namedthm*}{Discussion 3.2.5}
Given \(S = \{\vect{v_1}, \vect{v_2}, \dots, \vect{v_m}\} \subseteq \mathbb{R}^n\}\), show \(\lspan(S) = \mathbb{R}^n\):
\medskip
\noindent
Consider \(\vect{v_i} = \left(v_{i1}, \dots, v_{in}\right)\),
\[
\begin{pmatrix}
\vect{v_{11}} & \dots & \vect{v_{m1}}\\
\vdots & \ddots & \vdots\\
\vect{v_{1n}} & \dots & \vect{v_{mn}}
\end{pmatrix} \xrightarrow{GE} \matr{R}
\]
\(\lspan(S) = \mathbb{R}^n \Leftrightarrow \matr{R}\) has no zero rows
\end{namedthm*}
\begin{namedthm*}{Theorem 3.2.7}
If \(|S| < n\), \(\lspan(S) \ne \mathbb{R}^n\).
\end{namedthm*}
\begin{namedthm*}{Theorem 3.2.10}
Let \(S_1 = \{\vect{u_1}, \dots, \vect{u_k}\}\) and \(S_2 = \{\vect{v_1}, \dots, \vect{v_m}\}\) be subsets of \(\mathbb{R}^n\). Then, \(\lspan(S_1) \subseteq \lspan(S_2) \Leftrightarrow \forall i=1, 2, \dots, k\), \(u_i \in \lspan\{\vect{v_1}, \dots, \vect{v_m}\}\).
\end{namedthm*}
\begin{namedthm*}{Definition 3.3.2}
Let \(V\) be a subset of \(\mathbb{R}^n\). Then \(V\) is called a \textit{subspace} of \(\mathbb{R}^n\) if \(V = \lspan(S)\) where \(S = \{\vect{u_1}, \dots, \vect{u_k}\}\) for some vectors \(\vect{u_1}, \dots, \vect{u_k} \in \mathbb{R}^n \).
\medskip
\noindent
More precisely, \(V\) is called the \textit{subspace spanned} by \(S\) (or the \textit{subspace spanned} by \( \vect{u_1}, \dots, \vect{u_k} \)). We also say that \(S\) \textit{spans} (or \(\vect{u_1}, \dots, \vect{u_k}\) \textit{span}) the subspace \(V\).
\medskip
\noindent
By contraposition, \(V = \lspan(S) \Rightarrow \vect{0} \in V \equiv \vect{0} \notin V \Rightarrow V \ne \lspan(S)\). (\(\ast\) i.e., If \(\vect{0}\) is not in \(V\), \(V\) is not a subspace of \(\mathbb{R}^n\))
\end{namedthm*}
\begin{namedthm*}{Theorem 3.3.6}
If \(V = \{\matr{x} | \matr{Ax} = \matr{0}\}\), \(V\) is a subspace of \(\mathbb{R}^n\).
\end{namedthm*}
\begin{namedthm*}{Remark 3.3.8}
Let \(V\) be a non-empty subset of \(\mathbb{R}^n\). Then \(V\) is a subspace of \(\mathbb{R}^n\) if and only if
\[
\text{for all } \vect{u}, \vect{v} \in V \text{ and } c, d\in \mathbb{R},\enspace c\vect{u} + d\vect{v} \in V
\]
(\(\ast\) This checks whether V is \textbf{closed} under addition and scalar multiplication)
\end{namedthm*}
\begin{namedthm*}{Definition 3.4.2/4}
Consider \(\vect{u_1}, \vect{u_2}, ..., \vect{u_k}\) which are column vectors, set \(S = {\vect{u_1}, \vect{u_2}, ..., \vect{u_k}}\) is \textbf{Linear Indepedent} iff. any of:
\begin{enumerate}
% T3.4.2
\item \((\vect{u_1} \vect{u_2} ... \vect{u_k})\vect{x} = \matr{0}\) has only trivial solution.
% T3.4.4
\item No vectors in \(S\) can be written as a linear combination of other vectors in \(S\).
% From revision note of tutor
\item \(S\) is a subset of a \textbf{Linear Independent} set.
\end{enumerate}
\end{namedthm*}
\begin{namedthm*}{Definition 3.5.4/Theorem 3.6.7}
A set \(S\) is a basis of a vector space if:
\begin{enumerate}[label*=\arabic*.]
\item \(S \subseteq V\)
\item Any 2 of the 3 below:
\begin{enumerate}[label*=\arabic*.]
\item \(S\) is Linear Independent
\item \(S\) spans \(V\)
\item \(|S| = \dim(V)\)
\end{enumerate}
\end{enumerate}
\end{namedthm*}
\begin{namedthm*}{Definition 3.5.8}
Let \(S = {\vect{u_1}, \vect{u_2}, ..., \vect{u_k}}\) be a basis for a vector space \(V\) and \(\vect{v}\) is a vector in \(V\). By T3.5.7, \(\vect{v}\) is expressed uniquely as a LC:
\[
\vect{v} = c_1\vect{u_1} + c_2\vect{u_2} + \dots + c_k\vect{u_k}
\]
Then we shall have the \textbf{coordinate vector} of \(\vect{v}\) relative to the basis \(S\) : \((\vect{v})_S = (c_1, c_2, \dots, c_k) \in \mathbb{R}^k\) (assuming vectors in \(S\) are in fixed order).
\end{namedthm*}
\begin{namedthm*}{Remark 3.5.10/Theorem 3.5.11}
Let \(S\) be a basis for a vector space \(V\),
\begin{enumerate}
\item \(\forall \vect{u}, \vect{v} \in V, \vect{u} = \vect{v} \Leftrightarrow (\vect{u})_S = (\vect{v})_S\)
\item Coordinate vectors are closed under scalar multiplication and addition
\item Let \(\vect{v_1}, \vect{v_2}, \dots, \vect{v_r} \in V\), they are LI iff. \((\vect{v_1})_S, (\vect{v_2})_S, \dots, (\vect{v_k})_S\) are LI
\item \(\lspan {\vect{v_1}, \vect{v_2}, \dots, \vect{v_r}} = V \Leftrightarrow \lspan {(\vect{v_1})_S, (\vect{v_2})_S, \dots, (\vect{v_k})_S} = \mathbb{R}^{|S|}\)
\end{enumerate}
\end{namedthm*}
\begin{namedthm*}{Theorem 3.6.9}
Let \(U\) be a subspace of \(V\), then \(\dim(U) \le \dim(V)\). Furthermore, if \(U \ne V\), then \(\dim(U) < \dim(V)\).
\end{namedthm*}
\begin{namedthm*}{Definition 3.7.3}
Let \(S = {\vect{u_1}, \vect{u_2}, ..., \vect{u_k}}\) and \(T\) be two bases for a vector space. The square matrix \(\matr{P} =\begin{pmatrix}{[\vect{u_1}]}_T & {[\vect{u_2}]}_T & \dots & {[\vect{u_k}]}_T\end{pmatrix}\) is called the \textbf{transition matrix} from \(S\) to \(T\).
\end{namedthm*}
\begin{namedthm*}{Mixed Theorem 6}
Consider \(S\) and \(T\) are two bases for vector space \(V\) and \(\matr{P}\) is the transition matrix from \(S\) to \(T\). If \(\matr{A}\) and \(\matr{B}\) are matrices with elements of \(S\) and \(T\) respectively as columns, we have \(\matr{BP} = \matr{A}\).
\end{namedthm*}
\begin{namedthm*}{Mixed Theorem 7}
ERO preserves row space \textbf{(T4.1.7)}, and we have:
\begin{itemize}
\item \textbf{(R4.1.9)} \(\matr{R}\) is RREF of \(\matr{A}\). Non-empty rows in \(\matr{R}\) forms the basis of row space of \(\matr{A}\).
\item \textbf{(T4.2.1)} Row space and column space of a matrix have the same dimension.
\end{itemize}
\end{namedthm*}
\begin{namedthm*}{Remark 4.2.5}
Regarding rank(\(\matr{A}\)):
\begin{enumerate}
\item For \(m * n\) matrix \(\matr{A}\), \(\rank(\matr{A}) \le \min{m, n}\). If \(\rank(\matr{A}) = \min{m, n}\), \(\matr{A}\) is said to have \textbf{full rank}.
\item A square matrix \(\matr{A}\) have full rank iff. it is invertible.
\item \(\rank(\matr{A}) = \rank(\matr{A^T})\).
\end{enumerate}
\end{namedthm*}
\begin{namedthm*}{Theorem 4.3.6}
Suppose linear system \(\matr{A}\vect{x} = \vect{b}\) has solution \(\vect{v}\), then the solution set of this system is given by:
\[
M = \{ \vect{u} + \vect{v} | \vect{u} \in \text{nullspace\((\matr{A})\)}\}
\]
\end{namedthm*}
\section{Orthogonality}
\begin{namedthm*}{Definition 5.1.2.3/4}
For two vectors \(\vect{u}\) and \(\vect{v}\):
\(d(\vect{u}, \vect{v}) = \norm{\vect{u} - \vect{v}}\).
Angle between \(\vect{u}\) and \(\vect{v}\) is:
\[
\cos^{-1}(\frac{\vect{u}\cdot\vect{v}}{\norm{\vect{u}}\norm{\vect{v}}})
\]
\end{namedthm*}
\begin{namedthm*}{Theorem 5.2.4}
If \(S\) is an orthogonal set of non-zero vectors in a vector space, \(S\) is \textbf{LI}.
\end{namedthm*}
\begin{namedthm*}{Theorem 5.2.8}
Consider \(S = \{\vect{u_1}, \vect{u_2}, \dots, \vect{u_k}\}\) is a basis for a vector space \(V\), then for any vector \(\vect{w}\) in \(V\):
\begin{enumerate}
\item If \(S\) is orthogonal, we have
\[
(\vect{w})_S = (\frac{\vect{w}\cdot\vect{u_1}}{\vect{u_1}\cdot\vect{u_1}}\vect{u_1}, \frac{\vect{w}\cdot\vect{u_2}}{\vect{u_2}\cdot\vect{u_2}}\vect{u_2}, \dots, \frac{\vect{w}\cdot\vect{u_k}}{\vect{u_k}\cdot\vect{u_k}}\vect{u_k})
\]
\item If \(S\) is orthonomal, we have
\[
(\vect{w})_S = (\vect{w}\cdot\vect{u_1}, \vect{w}\cdot\vect{u_2}, \dots, \vect{w}\cdot\vect{u_k})
\]
\end{enumerate}
\textbf{T5.2.15}: \((\vect{w})_S\) is the projection of \(\vect{w}\) onto \(V\) if \(\vect{w} \in \mathbb{R}^n \land V\) is a subspace of \(\mathbb{R}^n\) (condition of \(\vect{w}\) changed but same formula applies).
\end{namedthm*}
\begin{namedthm*}{Theorem 5.2.19 (Gram-Schmidt Process)}
Let {\(\vect{u_1}, \vect{u_2}, \dots, \vect{u_k}\)} be a basis for a vector space \(V\). Let
\begin{gather*}
\vect{v_1} = \vect{u_1},\\
\vect{v_2} = \vect{u_2} - \frac{\vect{u_2}\cdot\vect{v_1}}{\vect{v_1}\cdot\vect{v_1}}\vect{v_1},\\
\vect{u_3} = \vect{u_3} - \frac{\vect{u_3}\cdot\vect{v_1}}{\vect{v_1}\cdot\vect{v_1}}\vect{v_1} - \frac{\vect{u_3}\cdot\vect{v_2}}{\vect{v_2}\cdot\vect{v_2}}\vect{v_2},\\
\vdots
\end{gather*}
Then {\(\vect{v_1}, \vect{v_2}, \dots, \vect{v_k}\)} is an orthogonal basis for \(V\). Normalize all vectors in it then we have a orthonormal basis for \(V\).
\end{namedthm*}
\begin{namedthm*}{Definition 5.3.6}
Let \(\matr{A}\vect{x} = \vect{b}\) be a linear system where \(\matr{A}\) is an \(m * n\) matrix. A vector \(\vect{u} \in \mathbb{R}^n\) is called a \textbf{least squares solution} to the linear system if \(\forall \vect{u} \in \mathbb{R}^n, \norm{\vect{b} - \matr{A}\vect{u}} \le \norm{\vect{b} - \matr{A}\vect{v}}\).
\end{namedthm*}
\begin{namedthm*}{Theorem 5.3.8}
Continuing \textbf{D5.3.6}, let \(\vect{p}\) be the projection of \(\vect{b}\) onto the column space of \(\matr{A}\). \(\vect{u}\) is the least squares solution iff. \(\matr{A}\vect{u} = \vect{p}\).
\end{namedthm*}
\begin{namedthm*}{Theorem 5.3.10}
Continuing \textbf{D5.3.6}, \(\vect{u}\) is the least squares solution iff. \(\vect{u}\) is a solution to \(\matr{A}^T\matr{A}\vect{x} = \matr{A}^T\vect{b}\).
\end{namedthm*}
\begin{namedthm*}{D5.4.3/R5.4.4/T5.4.6}
\(\matr{A}\) is a square matrix of order \(n\). The following are equivalent:
\begin{enumerate}
\item \(\matr{A}\) is orthogonal
\item \(\matr{A}^{-1} = \matr{A}^T\)
\item \(\matr{A}\matr{A}^T = \matr{A}^T\matr{A} = \matr{I}\)
\item The rows of \(\matr{A}\) form an \textbf{orthonormal} basis for \(\mathbb{R}^n\)
\item The columns of \(\matr{A}\) form an \textbf{orthonormal} basis for \(\mathbb{R}^n\)
\end{enumerate}
\end{namedthm*}
\begin{namedthm*}{Theorem 5.4.7}
Let \(S\) and \(T\) be two \textbf{orthonormal} bases for a vector space and let \(\matr{P}\) be the transition matrix from \(S\) to \(T\). Then \(\matr{P}\) is orthogonal and \(\matr{P}^T\) is the transition matrix from \(T\) to \(S\).
\end{namedthm*}
\section{Diagonalization}
\begin{namedthm*}{Definition 6.1.3}
\(\matr{A}\) is a square matrix of order \(n\). \(\vect{u} \in \mathbb{R}^n\) is an non-zero column vector that satisfies:
\[
\matr{A}\vect{u} = \lambda \vect{u}
\]
for some scalar \(\lambda\). \(\lambda\) is called an \textbf{eigenvalue} of \(\matr{A}\). \(\vect{u}\) is said to be an \textbf{eigenvector} of \(\matr{A}\) \textbf{associated} with the eigenvalue \(\lambda\).
\end{namedthm*}
\begin{namedthm*}{Theorem 6.1.9}
If \(\matr{A}\) is triangular, the eigenvalues of \(\matr{A}\) are the diagonal entries of \(\matr{A}\).
\end{namedthm*}
\begin{namedthm*}{Remark 6.2.5}
Suppose the characteristic polynomial of the matrix \(\matr{A}\) can be factorized as
\[
\det(\lambda\matr{I} - \matr{A}) = (\lambda - \lambda_1)^{r_1}(\lambda - \lambda_2)^{r_2}\dots(\lambda - \lambda_k)^{r_k}
\]
where \(\lambda_1, \lambda_2, \dots, \lambda_k\) are distinct eigenvalues of \(\matr{A}\). Then for each eigenvalue \(\lambda_i\),
\[
\dim(E_{\lambda_i}) \le r_i
\]
Furthermore, \(\matr{A}\) is diagonalizable iff. \(\forall 1 \le i \le k, \dim(E_{\lambda_i}) = r_i\).
\end{namedthm*}
\begin{namedthm*}{Definition 6.3.2/T*.4}
A square matrix \(\matr{A}\) is said to be orthogonally diagonalizable iff. there exists an orthogonal matrix \(\matr{P}\) such that \(\matr{P}^T\matr{A}\matr{P}\) is diagonal.
A square matrix is orthogonally diagonalizable iff. it is \textbf{symmetric}.
\end{namedthm*}
\begin{namedthm*}{Algorithm 6.3.5}
Similar to the process for the normal matrix, orthogonal matrix \(\matr{P}\) can be found by using vectors of \(T\) as \textbf{its columns} where \(T = T_{\lambda_1} \cup T_{\lambda_2} \cup \dots \cup T_{\lambda_k}\) and \(T_{\lambda_i}\) is transformed from \(S_{\lambda_1}\) using Gram-Schmidt Process.
\end{namedthm*}
\section{Linear Transformation}
\begin{namedthm*}{Theorem 7.1.4}
Let \(T\) be a linear transformation, we have:
\begin{enumerate}
\item \(T(\vect{0}) = \vect{0}\)
\item \(T\) is closed under scalar multiplication and addition
\end{enumerate}
\end{namedthm*}
\begin{namedthm*}{Discussion 7.1.8}
Let \(T: \mathbb{R}^n \rightarrow \mathbb{R}^m\) be a linear transformation with the standard matrix \(\matr{A}\). Let \(\{\vect{e_1}, \vect{e_2}, \dots, \vect{e_n}\}\) be the standard basis for \(\mathbb{R}^n\). We then have:
\[
\matr{A} =
\begin{pmatrix}
T(\vect{e_1}) & T(\vect{e_2}) & \dots & T(\vect{e_n})
\end{pmatrix}
\]
\end{namedthm*}
\begin{namedthm*}{Theorem 7.2.4}
Continuing \textbf{D7.1.8}. We have:
\[
R(T) = \lspan\{T(\vect{e_1}), T(\vect{e_2}), \dots, T(\vect{e_n})\} = \text{the column space of }\vect{A}
\]
which is a subspace of \(\mathbb{R}^m\)
\end{namedthm*}
\begin{namedthm*}{D7.2.5/T7.2.9/D7.2.10/T7.2.12}
Continuing \textbf{T7.2.4}. We have:
\begin{itemize}
\item \(\rank(T) = \dim(R(T)) = \rank(\matr{A})\)
\item \(\nullity(T) = \nullity(\matr{A})\)
\item \(\rank(T) + \nullity(T) = n\)
\item \(\ker(T) = \text{the nullspace of }\matr{A}\)
\end{itemize}
\end{namedthm*}
\end{justifying}
\end{multicols}
\end{document}