\documentclass{article}% \usepackage{amsmath} \usepackage{amsfonts} \usepackage{amssymb} \usepackage{graphicx}% \setcounter{MaxMatrixCols}{30} %TCIDATA{OutputFilter=latex2.dll} %TCIDATA{Version=5.00.0.2552} %TCIDATA{CSTFile=40 LaTeX article.cst} %TCIDATA{Created=Tuesday, August 18, 2015 14:51:12} %TCIDATA{LastRevised=Monday, October 26, 2015 11:08:21} %TCIDATA{} %TCIDATA{} %TCIDATA{} %TCIDATA{Language=American English} \newtheorem{theorem}{Theorem} \newtheorem{acknowledgement}[theorem]{Acknowledgement} \newtheorem{algorithm}[theorem]{Algorithm} \newtheorem{axiom}[theorem]{Axiom} \newtheorem{case}[theorem]{Case} \newtheorem{claim}[theorem]{Claim} \newtheorem{conclusion}[theorem]{Conclusion} \newtheorem{condition}[theorem]{Condition} \newtheorem{conjecture}[theorem]{Conjecture} \newtheorem{corollary}[theorem]{Corollary} \newtheorem{criterion}[theorem]{Criterion} \newtheorem{definition}[theorem]{Definition} \newtheorem{example}[theorem]{Example} \newtheorem{exercise}[theorem]{Exercise} \newtheorem{lemma}[theorem]{Lemma} \newtheorem{notation}[theorem]{Notation} \newtheorem{problem}[theorem]{Problem} \newtheorem{proposition}[theorem]{Proposition} \newtheorem{remark}[theorem]{Remark} \newtheorem{solution}[theorem]{Solution} \newtheorem{summary}[theorem]{Summary} \newenvironment{proof}[Proof]{\noindent\textbf{#1.} }{\ \rule{0.5em}{0.5em}} \begin{document} \title{Math 413/513 Chapter 4 (from Friedberg, Insel, \& Spence)} \author{David Glickenstein} \maketitle \section{Determinants} \subsection{Definitions and main results} \begin{definition} Let $A=\left( A_{ij}\right) \in F^{n\times n}.$ The \emph{determinant} $\det\left( A\right)$ is a scalar defined recursively as $\det\left( A\right) =A_{11}$ if $n=1$ and if $n\geq2,$ $\det A=\sum_{j=1}^{n}\left( -1\right) ^{1+j}A_{1j}\det\left( \tilde{A}% _{1j}\right)$ where $\tilde{A}_{ij}\in F^{\left( n-1\right) \times\left( n-1\right) }$ is the matrix obtained from $A$ by deleting the $i$th row and $j$th column. Sometimes $\det A$ is denoted as $\left\vert A\right\vert .$ The scalar $\left( -1\right) ^{i+j}A_{ij}\det\left( \tilde{A}_{ij}\right)$ is called the $i,j$ \emph{cofactor}. \end{definition} We note that in the $2\times2$ case, we have $\det\left( \begin{array} [c]{cc}% a & b\\ c & d \end{array} \right) =ad-bc.$ \begin{theorem} \label{thm:det linear}The determinant is a linear function of each row if the other rows are held fixed. That is, for any $r$ between $1$ and $n,$% $\det\left( \begin{array} [c]{c}% a_{1}\\ \vdots\\ a_{r-1}\\ u+kv\\ a_{r+1}\\ \vdots\\ a_{n}% \end{array} \right) =\det\left( \begin{array} [c]{c}% a_{1}\\ \vdots\\ a_{r-1}\\ u\\ a_{r+1}\\ \vdots\\ a_{n}% \end{array} \right) +k\det\left( \begin{array} [c]{c}% a_{1}\\ \vdots\\ a_{r-1}\\ v\\ a_{r+1}\\ \vdots\\ a_{n}% \end{array} \right)$ for $k$ a scalar and $a_{j},u,v$ row vectors. \end{theorem} Note that this tells us how the determinant is affected by one type of elementary row operation. \begin{corollary} \label{cor:row of zeroes}If $A$ has a row of all zeroes, then $\det A=0$ \end{corollary} \begin{theorem} \label{thm:cofactor expansion}The determinant can be defined by a cofactor expansion in any row, i.e., for any $i,$% $\det A=\sum_{j=1}^{n}\left( -1\right) ^{i+j}A_{ij}\det\left( \tilde{A}% _{ij}\right) .$ \end{theorem} \begin{corollary} \label{cor:identical rows}If $A$ has two identical rows, then $\det A=0.$ \end{corollary} \begin{theorem} \label{thm:exchange rows}If $B$ is obtained from $A$ by exchanging any two rows, $\det B=-\det A.$ \end{theorem} \begin{theorem} \label{thm:det row op type 3}Let $B$ be obtained from $A$ by adding a multiple of one row to another. Then $\det B=\det A.$ \end{theorem} \begin{corollary} \label{cor: lower rank}If $A\in F^{n\times n}$ has $\operatorname{rank}A1,$ then \begin{align*} \det A & =\sum_{j=1}^{n}\left( -1\right) ^{1+j}A_{1j}\det\left( \tilde {A}_{1j}\right) \\ & =\sum_{j=1}^{n}\left( -1\right) ^{1+j}A_{1j}\left( \det\tilde{B}% _{1j}+k\det\tilde{C}_{1j}\right) \\ & =\sum_{j=1}^{n}\left( -1\right) ^{1+j}B_{1j}\det\tilde{B}_{1j}% +k\sum_{j=1}^{n}\left( -1\right) ^{1+j}C_{1j}\det\tilde{C}_{1j}\\ & =\det B+k\det C. \end{align*} We leave the case of $r=1$ as an exercise. \end{proof} \begin{proof} [Proof of Corollary \ref{cor:row of zeroes}]Exercise. \end{proof} \begin{proof} [Proof of Theorem \ref{thm:cofactor expansion}]We do induction on $n.$ The base case is easy. Notice that since we can expand in any row in $\tilde {A}_{ij},$ we can compute the following if we let $\tilde{A}_{ij,k\ell}$ be the matrix obtained by removing the $i$th and $k$th rows and $j$th and $\ell$th columns from $A.$% \begin{align*} \det A & =\sum_{j=1}^{n}\left( -1\right) ^{1+j}A_{1j}\det\left( \tilde {A}_{1j}\right) \\ & =\sum_{j=1}^{n}\left( -1\right) ^{1+j}A_{1j}\left( \sum_{k=1}% ^{j-1}\left( -1\right) ^{k+i-1}A_{ik}\det\left( \tilde{A}_{1j,ik}\right) +\sum_{k=j+1}^{n}\left( -1\right) ^{k+i}A_{ik}\det\left( \tilde{A}% _{1j,ik}\right) \right) \\ & =\sum_{j=1}^{n}\sum_{k=1}^{j-1}\left( -1\right) ^{j+k+i}A_{1j}A_{ik}% \det\left( \tilde{A}_{1j,ik}\right) +\sum_{j=1}^{n}\sum_{k=j+1}^{n}\left( -1\right) ^{j+k+i+1}A_{1j}A_{ik}\det\left( \tilde{A}_{1j,ik}\right) \\ & =\sum_{kj}\left( -1\right) ^{j+k+i+1}A_{1j}A_{ik}% \det\left( \tilde{A}_{1j,ik}\right) \end{align*} Similarly,% \begin{align*} & \sum_{k=1}^{n}\left( -1\right) ^{i+k}A_{ik}\det\left( \tilde{A}% _{ik}\right) \\ \;\;\; & =\sum_{k=1}^{n}\left( -1\right) ^{i+k}A_{ik}\left( \sum _{j=1}^{k-1}\left( -1\right) ^{1+j}A_{1j}\det\left( \tilde{A}% _{ik,1j}\right) +\sum_{j=k+1}^{n}\left( -1\right) ^{1+j-1}A_{1j}\det\left( \tilde{A}_{ik,1j}\right) \right) \\ & =\sum_{k=1}^{n}\sum_{j=1}^{k-1}\left( -1\right) ^{1+j+k+i}A_{ik}A_{1j}% \det\left( \tilde{A}_{ik,1j}\right) +\sum_{k=1}^{n}\sum_{j=k+1}^{n}\left( -1\right) ^{j+k+i}A_{ik}A_{1j}\det\left( \tilde{A}_{ik,1j}\right) \\ & =\sum_{jk}\left( -1\right) ^{j+k+i}A_{ik}A_{1j}% \det\left( \tilde{A}_{ik,1j}\right) . \end{align*} \end{proof} \begin{proof} [Proof of Corollary \ref{cor:identical rows}]We leave this as an exercise if $n\leq2.$ For $n\geq3,$ if we assume rows $1$ and $j$ are the same, then we can expand in a row other than those two rows. We see that each of the determinants in the expansion also has two identical rows, and by induction these determinants are all zero. Hence the determinant is zero. \end{proof} \begin{proof} [Proof of Theorem \ref{thm:exchange rows}]Let the rows of $A$ be labeled $a_{1},\ldots,a_{n}.$ We see that by linearity in the rows we get% $0=\det\left( \begin{array} [c]{c}% a_{1}\\ \vdots\\ a_{i}+a_{j}\\ \vdots\\ a_{i}+a_{j}\\ \vdots\\ a_{n}% \end{array} \right) =\det\left( \begin{array} [c]{c}% a_{1}\\ \vdots\\ a_{i}\\ \vdots\\ a_{i}\\ \vdots\\ a_{n}% \end{array} \right) +\det\left( \begin{array} [c]{c}% a_{1}\\ \vdots\\ a_{i}\\ \vdots\\ a_{j}\\ \vdots\\ a_{n}% \end{array} \right) +\det\left( \begin{array} [c]{c}% a_{1}\\ \vdots\\ a_{j}\\ \vdots\\ a_{i}\\ \vdots\\ a_{n}% \end{array} \right) +\det\left( \begin{array} [c]{c}% a_{1}\\ \vdots\\ a_{j}\\ \vdots\\ a_{j}\\ \vdots\\ a_{n}% \end{array} \right) .$ The first and last determinants are zero, and so if $A^{\prime}$ is gotten by exchanging rows $i$ and $j,$ then $\det A+\det A^{\prime}=0.$ \end{proof} \begin{proof} [Proof of Theorem \ref{thm:det row op type 3}]By linear in the rows, we get that $\det\left( \begin{array} [c]{c}% a_{1}\\ \vdots\\ a_{i}+ka_{j}\\ \vdots\\ a_{j}\\ \vdots\\ a_{n}% \end{array} \right) =\det\left( \begin{array} [c]{c}% a_{1}\\ \vdots\\ a_{i}\\ \vdots\\ a_{j}\\ \vdots\\ a_{n}% \end{array} \right) +k\left( \begin{array} [c]{c}% a_{1}\\ \vdots\\ a_{j}\\ \vdots\\ a_{j}\\ \vdots\\ a_{n}% \end{array} \right) =\det A+0$ since the last matrix has two of the same rows. \end{proof} \begin{proof} [Proof of Corollary \ref{cor: lower rank}]We now know how row operations affect the calculation of the determinant. If the rank is less than $n,$ we can perform row operations to get a row of all zeros. This matrix will have determinant zero and the row operations will all show that the determinant of $A$ is still zero. \end{proof} \begin{proof} [Proof of Theorem \ref{thm:product}]We know that if $A$ or $B$ is not rank $n,$ then its determinant is zero. Also, if $A$ or $B$ is not rank $n,$ $AB$ is not rank $n$ (why? Show $L_{A}$ is not onto implies $L_{AB}$ is not onto, and $L_{B}$ is not one-to-one implies $L_{AB}$ is not one-to-one). Hence the theorem is true if $A$ or $B$ is not rank $n.$ If both $A$ and $B$ are rank $n,$ so is $AB.$ Using what we know about how row operations affect determinants, we can easily see that for any matrix $C$ and elementary matrix $E,$ $\det\left( EC\right) =\left( \det E\right) \left( \det C\right) .$ Since any invertible matrix is a product of elementary matrices, we can show that $\det\left( AB\right) =\det\left( E_{k}\cdots E_{1}B\right) =\det\left( E_{k}\cdots E_{1}\right) \left( \det B\right) =\left( \det A\right) \left( \det B\right) .$ \end{proof} \begin{proof} [Proof of Corollary \ref{cor:invertible}]If $A$ is not invertible, then it has rank less than $n$ and so $\det A=0.$ If $A$ is invertible, then $1=\det I=\det\left( A^{-1}A\right) =\left( \det A^{-1}\right) \left( \det A\right)$ so $\det A\neq0$ and $\det A^{-1}=\frac{1}{\det A}.$ \end{proof} \begin{proof} [Proof of Theorem \ref{thm: transpose}]If $A$ is not invertible, then neither is $A^{T},$ and so $\det A^{T}=\det A=0.$ If $A$ is invertible, then $A=E_{1}\cdots E_{k}$ for some elementary matrices. We then have that $A^{T}=E_{k}^{T}\cdots E_{1}^{T}.$ We now see that $\det A^{T}=\left( \det E_{k}^{T}\right) \cdots\det\left( E_{1}^{T}\right)$ and the result follows from checking that $\det E_{k}^{T}=\det E_{k}$ for each type of elementary matrix. \end{proof} \subsection{Remarks on determinants as volumes} If $v_{1},\ldots,v_{n}$ are vectors in $\mathbb{R}^{n},$ then it turns out that $\det A,$ where the rows of $A$ are the vectors $v_{1},\ldots,v_{n}$ is equal to $\pm$ the volume of the parelelopiped determined by the vectors. Notice that this is zero if the vectors form a degenerate paralellopiped (lower dimensional), which is geometrically the same as saying the vectors are linearly dependent. The sign has to do with the ordering of the vectors, and obeys the right hand rule for $n=2,3,$ and gives a way of defining an analogue of the right hand rule in higher dimensions. This is called a choice of orientation and is important in algebraic topology and differential geometry. \section{Problems} \begin{itemize} \item FIS Section 4.1 exercises 2, 3, 6, 7, 10 \item FIS Section 4.2 exercises 3, 5-25,27, 29 \item FIS Section 4.3 exercises 9-13, 17, 21, 28 \end{itemize} \section{Characterization of the determinant (Comprehensive/Graduate option)} \begin{definition} A function $\delta:F^{n\times n}\rightarrow F$ is called an $n$\emph{-linear function} if it is a linear function of each row when the remaining rows are held fixed. \end{definition} \begin{definition} An $n$-linear function $\delta:F^{n\times n}\rightarrow F$ is called \emph{alternating} if $\delta\left( A\right) =0$ whenever two rows are identical. \end{definition} Note that the determinant satisfies both of these properties. In fact, it is essentially the only such function. \begin{theorem} \label{thm:determinant characterization}Let $\delta:F^{n\times n}\rightarrow F$ be an alternating $n$-linear function such that $\delta\left( I\right) =1.$ Then $\delta\left( A\right) =\det A$ for all $A\in F^{n\times n}.$ \end{theorem} \begin{proof} [Proof (sketch)]If you carefully look at our proofs from the previous section, all we used is $n$-linearity and alternating to get the characterization of what row operations do. It then follows that $\delta\left( A\right) =0$ if \$\operatorname{rank}A