dissertation_work/Notes/Iserles_Questions.tex

1271 lines
31 KiB
TeX
Raw Normal View History

2024-02-19 17:04:37 +00:00
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% %%
%% Preamble of document %%
%% %%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Sets up the standard "article" document class
\documentclass[12pt]{article}
% Loads packages
% Note that I typically load all packages at once (as below).
% Sometimes you must load them in a certain order...
\usepackage{amsmath,
amsfonts,
amssymb,
amsthm,
enumerate,
enumitem,
geometry,
mleftright,
nicefrac,
mathtools,
xparse,
ifthen,
tikz,
pgfplots,
floatrow,
multicol,
caption,
xurl,
relsize,
bbm
}
% Set up PGF plots (used for graphing)
\pgfplotsset{compat=newest}
% Fixes spacing caused by using "\left" or "\right"
\mleftright
% Sets the margins of the document.
% One can do this using plain TeX, but why?
\geometry{margin=1in}
% Lines 44-47 set up my use of the cleveref package.
% This provides a "smarter" cross referencing style
\usepackage[colorlinks=true]{hyperref}
\usepackage[sort,capitalise]{cleveref}
\newcommand{\creflastconjunction}{, and\nobreakspace}
\crefname{enumi}{item}{items}
% My personal command macros that I use.
\newcommand{\R}{\mathbb{R}}
\newcommand{\C}{\mathbb{C}}
\newcommand{\Z}{\mathbb{Z}}
\newcommand{\N}{\mathbb{N}}
\newcommand{\Q}{\mathbb{Q}}
\newcommand{\dpp}{\text{.}}
\newcommand{\dc}{\text{,}}
\newcommand{\dd}{{\rm d}}
\newcommand{\dx}{\, {\rm d}}
\newcommand{\lr}{\ensuremath{\mkern-1.5mu}}
\newcommand{\induct}{\dashrightarrow}
\newcommand{\with}{\curvearrowleft}
\newcommand{\medcup}[1]{{\raisebox{0.25ex}{$\mathsmaller{\ensuremath{\bigcup}}_{#1}$}}}
% Creates my paired math delimiters for easy use.
\DeclarePairedDelimiter{\pr}{(}{)}
\DeclarePairedDelimiter{\br}{[}{]}
\DeclarePairedDelimiter{\cu}{\{}{\}}
\DeclarePairedDelimiter{\abs}{\lvert}{\rvert}
\DeclarePairedDelimiter{\norm}{\lVert}{\rVert}
\DeclarePairedDelimiter{\vt}{\langle}{\rangle}
\DeclarePairedDelimiter{\floor}{\lfloor}{\rfloor}
% Creates desired header and footer for document.
% Note that it uses the adaptively created date macro.
\setlength{\headheight}{15pt}
\usepackage{fancyhdr}
\pagestyle{fancy}
\fancyhf{}
\fancyhead[R]{}
\cfoot{}
\lfoot{\small{\textsc{Dissemination prohibited. \today}}}
\rfoot{\thepage}
% Updates equation and figure numbering.
\counterwithin{equation}{section}
\counterwithin{figure}{section}
% Defines theorem-like environments
\theoremstyle{definition}
\newtheorem{definition}[equation]{Definition}
\newtheorem{setting}[equation]{Setting}
\newtheorem{theorem}[equation]{Theorem}
\newtheorem{lemma}[equation]{Lemma}
\newtheorem{problem}[equation]{Problem}
\newtheorem{remark}[equation]{Remark}
\newtheorem{assumption}[equation]{Assumption}
\newtheorem{example}[equation]{Example}
\renewcommand{\thetheorem}{\theenumi\arabic{theorem}}
%%%%%%%%%%%%%%%%%%%%%%%%
%%% Automatic proof headers and endings, local labels
%%%%%%%%%%%%%%%%%%%%%%%%
\ExplSyntaxOn
\NewDocumentEnvironment {athm} {m m} {%
\phantomsection
\addcontentsline{toc}{subsection}{\texorpdfstring{\cref{#2}}{\ref*{#2}}}
\begin{#1}\label{#2}\global\def\loc{#2}%
}{%
\end{#1}%
}
\NewDocumentEnvironment{aproof} {} {%
\begin{proof}[Proof~of~\cref{\loc}]%
}{%
\finishproofthus
\end{proof}%
}
\ExplSyntaxOff
\newcommand{\eqqref}[1]{\cref{eq:\loc.#1}}
\newcommand{\eqlabel}[1]{\label{eq:\loc.#1}}
\newcommand{\finishproofthus}{The proof of \cref{\loc} is thus complete.}
\NewDocumentEnvironment{asol} {} {%
\begin{proof}[Solution~to~\cref{\loc}]%
}{%
\noindent\finishsolthus
\end{proof}%
}
\newcommand{\finishsolthus}{The solution to \cref{\loc} is thus complete.}
%%%%%%%%%%%%%%%%%%%%%%%%
%%% Creating blank space with line breaks
%%%%%%%%%%%%%%%%%%%%%%%%
% Note that the code in lines 145-154 is simply a method to
% create blank space that allows for proof environments to
% break across pages. This is something that TeX does not
% like by default, so this is my effort to bypass this.
\newcommand{\linebreaks}[1]{{\mbox{}\par
\newcount\foo
\foo=#1
\loop
\phantom{a}
\advance \foo -1
\ifnum \foo>0
\repeat
\mbox{}\par}}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% %%
%% Beginning of actual text. %%
%% %%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
\title{Some questions related to Iserles' textbook}
%
%
%
\author{Joshua Lee Padgett}
%
%
%
\date{\today}
%
%
%
\begin{document}
%
%
%
\maketitle
%
%
%
%\begin{abstract}
%
%\end{abstract}
%
%
%
\tableofcontents
%
%
%
\section{Euler's method and beyond}
%
%
%
The following questions are meant to help ensure you have a solid \emph{conceptual} understanding of the material from Chapter 1 of Iserles' textbook.
%
%
%
%
%
\bigskip
%
%
%
\begin{athm}{setting}{setting1}
%
Let $T \in (0,\infty)$, $d \in \N = \{1,2,3,\dots\}$,
let $\norm{\cdot} \colon \R^d \to [0,\infty)$ be a function which satisfies for all $u,v \in \R^d$, $s \in \R$ that $\norm{u+v} \le \norm{u}+\norm{v}$, $\norm{ su } = \abs{s} \norm{u}$, and $\norm{u} = 0$ if and only if $u = 0$,
let $\floor{\cdot}_h \colon [0,T] \to [0,T]$, $h \in (0,\infty)$, be the functions which satisfy for all $h \in (0,\infty)$, $t \in [0,T]$ that $\floor{t}_h = \max\pr[]{ [0,t] \cap \{0,h,2h,\dots\} }$,
let $f \colon \R^d \to \R^d$ be a function which satisfies that
%
\begin{equation}
\br*{ \sup_{v \in \R^d} \norm[\big]{ f(v) } } + \br*{ \sup_{v,w\in\R^d, v\neq w} \frac{ \norm{ f(v) - f(w) } }{ \norm{ v-w } } } < \infty \dc
\end{equation}
%
let $y \colon [0,T] \to \R^d$ be a measurable function which satisfies for all $t \in [0,T]$ that
%
\begin{equation}\label{def:y_prob}
y(t) = y(0) + \int_0^t f\pr[\big]{ y(s) } \dx s \dc
\end{equation}
%
and for every $h \in (0,\infty)$ let $Y_{0,h}, Y_{1,h} , \dots , Y_{\floor{T/h},h} \in \R^d$ satisfy for all $n \in \{0,1,\dots,\floor{\nicefrac{T}{h}}-1\}$ that $Y_{0,h} = y(0)$ and
%
\begin{equation}\label{def:euler}
Y_{n+1,h} = Y_{n,h} + h f \pr[\big]{ Y_{n,h} } \dpp
\end{equation}
%
\end{athm}
%
%
%
\begin{athm}{problem}{prob1}
%
Do you understand \cref{setting1} above?
%
%
Do you understand what each individual component means and do you see why each component is necessary to present a well-defined numerical method (i.e., the method in \cref{def:euler})?
%
\end{athm}
%
%
%
\begin{aproof}
\linebreaks{10}
\noindent
\end{aproof}
%
%
%
\begin{athm}{definition}{def:converge}
%
Assume \cref{setting1}.
%
%
We say that \cref{def:euler} is a convergent numerical method for \cref{def:y_prob} if and only if it holds that
%
\begin{equation}
\lim_{h \to 0^+} \br*{ \max_{n \in \{0,1,\dots,\floor{T/h} \} } \norm[\big]{ y(nh) - Y_{n,h} } } = 0 \dpp
\end{equation}
%
\end{athm}
%
%
%
\begin{athm}{problem}{prob2}
%
Do you understand conceptually what the notion of convergence is implying?
%
Can you see how the topology of the problem would come into play if we were not considering a problem posed in a finite-dimensional space?
%
\end{athm}
%
%
%
\begin{aproof}
\linebreaks{10}
\noindent
\end{aproof}
%
%
%
\begin{athm}{lemma}{lem:gronwall}
%
Let $\alpha \in [0,\infty)$ and let $a_0,a_1,a_2,\ldots \in [0,\infty)$ and $b_0,b_1,b_2,\ldots \in [0,\infty)$ satisfy for all $n \in \N_0 = \N \cup \{0\}$ that
%
\begin{equation}\label{eq:gron1}
a_n \le \alpha + \sum_{k=0}^{n-1} b_k a_k \dpp
\end{equation}
%
Then it holds for all $n \in \N_0$ that
%
\begin{equation}\label{eq:gron2}
a_n \le \alpha \exp\pr*{ \sum_{k=0}^{n-1} b_k } \dpp
\end{equation}
\end{athm}
%
%
%
\begin{aproof}
First, we claim that for all $n \in \N_0$ it holds that
%
\begin{equation}\label{eq:induct_claim}
a_n \le \alpha \br*{ \prod_{k=0}^{n-1} \pr[\big]{ 1 + b_k } } \dpp
\end{equation}
%
We now prove \cref{eq:induct_claim} by mathematical induction on $n \in \N_0$.
%
%
For the base case $n=0$, note that \cref{eq:gron1} ensures that
%
\begin{equation}
a_0 \le \alpha + \sum_{k=0}^{-1} b_k a_k = \alpha + 0 = \alpha \dpp
\end{equation}
%
Combining this and the fact that $\prod_{k=0}^{-1} (1 + b_k) = 1$ establishes \cref{eq:induct_claim} in the base case $n=0$.
%
For the induction step $\N_0 \ni (n-1) \induct n \in \N$, let $n \in \N$ and assume that for every $m \in \{0,1,\dots,n-1\}$ it holds that
%
\begin{equation}\label{eq:induct_claim_intermediate}
a_m \le \alpha \br*{ \prod_{k=0}^{m-1} \pr[\big]{ 1 + b_k } } \dpp
\end{equation}
%
This and \cref{eq:gron1} assure that
%
\begin{equation}
a_n
\le
\alpha + \sum_{k=0}^{n-1} b_k a_k
\le
\alpha + \sum_{k=0}^{n-1} b_k \pr*{ \alpha \br*{ \prod_{j=0}^{k-1} \pr[\big]{ 1 + b_j } } }
=
\alpha \pr*{ 1 + \sum_{k=0}^{n-1} \br*{ \prod_{j=0}^{k-1} \pr[\big]{ 1 + b_j } } b_k }
\dpp
\end{equation}
%
Next, observe that
%
\begin{equation}\label{eq:gron3}
\begin{split}
1 + \sum_{k=0}^{n-1} \br*{ \prod_{j=0}^{k-1} \pr[\big]{ 1 + b_j } } b_k
& =
1 + \sum_{k=0}^{n-1} \br*{ \prod_{j=0}^{k-1} \pr[\big]{ 1 + b_j } } \pr[\big]{ (1 + b_k) - 1 }
\\
& =
1 + \sum_{k=0}^{n-1} \br*{ \prod_{j=0}^{k} \pr[\big]{ 1 + b_j } - \prod_{j=0}^{k-1} \pr[\big]{ 1 + b_j } }
\\
& =
1 + \prod_{j=0}^{n-1} \pr[\big]{ 1 + b_j } - \prod_{j=0}^{-1} \pr[\big]{ 1 + b_j }
=
\prod_{j=0}^{n-1} \pr[\big]{ 1 + b_j }
\dpp
\end{split}
\end{equation}
%
Combining this, \cref{eq:gron3}, and mathematical induction establishes \cref{eq:induct_claim}.
%
Moreover, note that the fact that for all $x \in [0,\infty)$ it holds that $1 + x \le \exp(x)$, the assumption that $b_0,b_1,b_2,\ldots \in [0,\infty)$, and \cref{eq:induct_claim} imply that for all $n \in \N_0$ it holds that
%
\begin{equation}
a_n \le \alpha \br*{ \prod_{k=0}^{n-1} \pr[\big]{ 1 + b_k } }
\le \alpha \br*{ \prod_{k=0}^{n-1} \exp(b_k) }
\le \alpha \exp\pr*{ \sum_{k=0}^{n-1} b_k } \dpp
\end{equation}
%
This establishes \cref{eq:gron2}.
%
\end{aproof}
%
%
%
\begin{athm}{problem}{prob3}
%
Assume \cref{setting1}.
%
Using \cref{lem:gronwall} above, prove that there exists $C \in [0,\infty)$ such that for all $h \in (0,\infty)$
%%%, $n \in \{0,1,\dots,\floor{\nicefrac{T}{h}}\}$
it holds that
%
\begin{equation}\label{eq:converge}
\max_{n \in \{0,1,\dots,\floor{T/h}\} } \norm[\big]{ y(nh) - Y_{n,h} } \le C h \dpp
\end{equation}
%
Explain how proving \cref{eq:converge} holds would relate to the notion of convergence (cf.\ \cref{def:converge}).
%
\end{athm}
%
%
%
\begin{aproof}
\linebreaks{20}
\noindent
\end{aproof}
%
%
%
\begin{athm}{problem}{prob4}
%
Can you present the theta method from the textbook in the rigorous format used in \cref{setting1} above?
%
\end{athm}
%
%
%
\begin{aproof}
\linebreaks{15}
\noindent
\end{aproof}
%
%
%
\newcommand{\T}{T_{\text{new}}}
\begin{athm}{setting}{setting2}
%
Let $\T , p \in (0,\infty)$, $d \in \N$,
let $\norm{\cdot} \colon \R^d \to [0,\infty)$ be a function which satisfies for all $u,v \in \R^d$, $s \in \R$ that $\norm{u+v} \le \norm{u}+\norm{v}$, $\norm{ su } = \abs{s} \norm{u}$, and $\norm{u} = 0$ if and only if $u = 0$,
let $\floor{\cdot}_h \colon [0,\T] \to [0,\T]$, $h \in (0,\infty)$, be the functions which satisfy for all $h \in (0,\infty)$, $t \in [0,\T]$ that $\floor{t}_h = \max\pr[]{ [0,t] \cap \{0,h,2h,\dots\} }$,
let $g \colon \R^d \to \R^d$ be a function which satisfies that
%
\begin{equation}
%%%\br*{ \sup_{v \in \R^d} \norm[\big]{ f(v) } } +
%%%\br*{
\sup_{v,w\in\R^d, v\neq w} \frac{ \norm{ g(v) - g(w) } }{ \pr[\big]{ 1 + \norm{v}^p + \norm{w}^p }\norm{ v-w } }
%%%}
< \infty \dc
\end{equation}
%
let $z \colon [0,\T] \to \R^d$ be a measurable function which satisfies for all $t \in [0,\T]$ that
%
\begin{equation}\label{def:z_prob}
z(t) = z(0) + \int_0^t g\pr[\big]{ z(s) } \dx s \dc
\end{equation}
%
and for every $h \in (0,\infty)$ let $Z_{0,h}, Z_{1,h} , \dots , Z_{\floor{\T/h},h} \in \R^d$ satisfy for all $n \in \{0,1,\dots,\allowbreak\floor{\nicefrac{\T}{h}}-1\}$ that $Z_{0,h} = z(0)$ and
%
\begin{equation}\label{def:euler_new}
Z_{n+1,h} = Z_{n,h} + h g \pr[\big]{ Z_{n,h} } \dpp
\end{equation}
%
\end{athm}
%
%
%
\begin{athm}{problem}{prob5}
%
Can we prove a result similar to that in \cref{prob3} under the assumptions outline in \cref{setting2} above?
%
If not, can we prove a result that is ``similar'' to the result in \cref{prob3}?
%
What additional assumptions (if any) are needed to prove either of the above results?
%
\end{athm}
%
%
%
\begin{aproof}
\linebreaks{25}
\noindent
\end{aproof}
%
%
%
\subsection{An exploration of the linear case}
%
%
%
\begin{athm}{definition}{def:mat_exp}
%
We denote by
$ \exp \colon (\medcup{d \in \N} \C^{d\times d}) \to (\medcup{d \in \N} \C^{d\times d})$
the function which satisfies for all
$d \in \N$,
$A \in \C^{d \times d}$
that
%
$
\exp(A) = \sum_{k=0}^\infty (\nicefrac{1}{k!}) A^k
$.
%
\end{athm}
%
%
%
\begin{athm}{definition}{def:mat_det}
%
\newcommand{\ppp}{\mathfrak{p}}
\newcommand{\nnn}{\mathfrak{N}}
%
For every
$ d \in \N $
let
$ \nnn_d = \{1,2,\dots,d\} $,
for every
$ d \in \N $
let
$S_d = \{ (\sigma \colon \nnn_d \to \nnn_d ) \colon \text{$\sigma$ is a bijection} \}$,
let
$ \ppp \colon (\medcup{d\in\N}S_d) \to \N_0 $
be the function which satisfies for all
$ d \in \N $,
$ \sigma \in S_d $
that
$ \ppp(\sigma) = \sum_{i=1}^d \sum_{j=i+1}^d \mathbbm{1}_{(0,\infty)} (\sigma_i - \sigma_j) $,
and let
$ \operatorname{sgn} \colon (\medcup{d\in\N}S_d) \to \{-1,1\} $
be the function which satisfies for all
$ d \in \N $,
$ \sigma \in S_d $
that
$ \operatorname{sgn}(\sigma) = (-1)^{\ppp(\sigma)} $.
%
%
Then we denote by
$\det \colon (\medcup{d\in\N}\R^{d\times d}) \to \R$
the function which satisfies for all
$ d \in \N $,
$A = (a_{i,j})_{i,j \in \{1,2,\dots,d\}} \in \R^{d \times d}$
that
$ \det(A) = \sum_{\sigma \in S} \br[]{ \operatorname{sgn}(\sigma) \prod_{i=1}^d a_{i,\sigma_i} } $.
%
\end{athm}
%
%
%
\begin{athm}{definition}{def:mat_trace}
%
We denote by
$ \operatorname{tr} \colon (\medcup{d\in\N}\R^{d\times d}) \to \R $
the function which satisfies for all
$ d\in \N $,
$A = (a_{i,j})_{i,j \in \{1,2,\dots,d\}} \in \R^{d \times d}$
that
$ \operatorname{tr}(A) = \sum_{i=1}^d a_{i,i} $.
%
\end{athm}
%
%
%
\begin{athm}{lemma}{lem:mat_exp1}
%
Let
$d \in \N$,
$A,B \in \R^{d\times d}$
and
let $\norm{\cdot} \colon \R^d \to [0,\infty)$ be a function which satisfies for all $u,v \in \R^d$, $s \in \R$ that $\norm{u+v} \le \norm{u}+\norm{v}$, $\norm{ su } = \abs{s} \norm{u}$, and $\norm{u} = 0$ if and only if $u = 0$.
%
Then
%
\begin{enumerate}[label=(\roman*)]
%
\item
it holds that
$ \norm{ \exp(A) } \le \exp( \norm{A} ) < \infty$,
%
\item
it holds for all
$ s,t \in \R $
that
$ \exp( sA + tA ) = \exp(sA) \exp(tA) $,
%
\item
it holds that
$ \exp(A) \exp(-A) = \operatorname{id}_{\R^{d\times d}} $,
%
\item
it holds that
$ \exp( A + B ) = \exp(A) \exp(B) $
if and only if it holds that
$ AB = BA $,
and
%
\item
it holds that
$ \det(\exp(A)) = \exp(\operatorname{tr}(A)) $
%
\end{enumerate}
%
(cf.\ \cref{def:mat_exp,def:mat_det,def:mat_trace}).
%
\end{athm}
%
%
%
\begin{aproof}
\linebreaks{25}
\noindent
\end{aproof}
%
%
%
\begin{athm}{problem}{prob:mat_exp1}
%
Let
$ A \in \R^{2\times 2}$
satisfy
%
\begin{equation}
A =
\begin{pmatrix}
-1 & 1 \\ -2 & -4
\end{pmatrix}
\dpp
\end{equation}
%
\begin{enumerate}[label=(\roman*)]
%
\item \label{prob:mat_exp1_item1}
Show that there exist
$D = (d_{i,j})_{i,j\in\{1,2\}} \in \R^{2\times 2}$,
$P \in \R^{2\times 2}$
with
$ \det(P) \neq 0 $,
$ d_{1,2} = d_{2,1} = 0 $,
and
$ A = PDP^{-1} $
(cf.\ \cref{def:mat_det}).
%
\item
Use the results from \cref{prob:mat_exp1_item1} to show that
%
\begin{equation}
\exp(A)
=
\begin{pmatrix}
2\exp(-2) - \exp(-3) & \exp(-2) - \exp(-3) \\
2\exp(-3) - 2\exp(-2) & 2\exp(-3) - \exp(-2)
\end{pmatrix}
\end{equation}
%
(cf.\ \cref{def:mat_exp}).
%
\end{enumerate}
%
\end{athm}
%
%
%
\begin{aproof}
\linebreaks{25}
\noindent
\end{aproof}
%
%
%
\begin{athm}{problem}{prob:mat_exp2}
%
Let
$ T \in (0,\infty) $,
%
let $\norm{\cdot} \colon \R^2 \to [0,\infty)$ be the function which satisfies for all
$ u = (u_1,u_2) \in \R^2 $
that
$\norm{u} = [\abs{u_1}^2 + \abs{u_2}^2]^{\nicefrac{1}{2}}$,
%
let $\floor{\cdot}_h \colon [0,T] \to [0,T]$, $h \in (0,\infty)$, be the functions which satisfy for all $h \in (0,\infty)$, $t \in [0,T]$ that $\floor{t}_h = \max\pr[]{ [0,t] \cap \{0,h,2h,\dots\} }$,
%
let
$ A \in \R^{2\times 2}$,
$ y \in C([0,T],\R^2) $
satisfy for all
$ t \in [0,T] $
that
%%%$ y(0) = (1,1) \in \R^2 $
%%%and
%
\begin{equation}
A =
\begin{pmatrix}
-1 & 1 \\ -2 & -4
\end{pmatrix}
\qquad
\text{and}
\qquad
y(t) = (1,1)^* + \int_0^t A y(s) \dx s \dc
\end{equation}
%
and for every $h \in (0,\infty)$ let $Y_{0,h}, Y_{1,h} , \dots , Y_{\floor{T/h},h} \in \R^2$ satisfy for all $n \in \{0,1,\dots,\floor{\nicefrac{T}{h}}-1\}$ that $Y_{0,h} = y(0)$ and
%
\begin{equation}
Y_{n+1,h} = Y_{n,h} + h A Y_{n,h} \dpp
\end{equation}
%
\begin{enumerate}[label=(\roman*)]
%
\item
Prove that for all
$ t \in [0,T] $
it holds that
$ y(t) = \exp(tA)y(0) $
(cf.\ \cref{def:mat_exp}).
%
\item
Prove that for all
$h \in (0,\infty)$,
$n \in \{0,1,\dots,\floor{\nicefrac{T}{h}}\}$
it holds that
%
\begin{equation}
Y_{n,h} = \pr[\big]{ \operatorname{id}_{\R^{2\times 2}} + hA }^{\!n} y(0)
\dpp
\end{equation}
%
\item
Prove that for all
$ h \in (0,\infty) $
it holds that
%
\begin{equation}
\begin{split}
&
\norm[\Big]{ \exp(hA)y(0) - \pr[\big]{ \operatorname{id}_{\R^{2\times 2}} + hA } y(0) }
=
\norm*{ \int_0^h (h-s) A^2 \exp\pr[\big]{ sA } y(0) \dx s }
\\
&
\qquad
\le
\frac{h^2}{2} \br*{ \sup_{\mathfrak{h} \in (0,h)} \pr*{ \sup_{v \in \R^2 \backslash \{0\} } \frac{ \norm{ \exp(\mathfrak{h} A) v } }{ \norm{v} } } } \norm[\big]{ A^2 y(0) }
\le
\sqrt{17} \, h^2
\end{split}
\end{equation}
%
(cf.\ \cref{def:mat_exp}).
%
\item
Prove that for all
$h \in (0,\infty)$,
$n \in \{0,1,\dots,\floor{\nicefrac{T}{h}}\}$
it holds that
%
\begin{equation}
\begin{split}
&
y(nh) - Y_{n,h}
\\
&
\quad
=
\sum_{k=0}^{n-1} \exp\pr[\big]{ khA } \br[\Big]{ \exp\pr[\big]{ hA } - \pr[\big]{ \operatorname{id}_{\R^{2\times 2}} + hA } } \pr[\big]{ \operatorname{id}_{\R^{2\times 2}} + hA }^{(n-k-1)} y(0)
\end{split}
\end{equation}
%
(cf.\ \cref{def:mat_exp}).
%
\item
Prove that
%
\begin{equation}
\sup_{h \in (0,\infty)} \br*{ \max_{ n \in \{0,1,\dots,\floor{\nicefrac{T}{h}}\} } \norm[\big]{ y(nh) - Y_{n,h} } } \le T \exp\pr[\big]{ \nicefrac{9T}{2} } \sqrt{34} \, h
\dpp
\end{equation}
%
\end{enumerate}
\end{athm}
%
%
%
\begin{aproof}
\linebreaks{25}
\noindent
\end{aproof}
%
%
%
\section{Multistep methods}
%
%
%
\newcommand{\U}{\mathbb{U}}
\newcommand{\A}{\mathcal{A}}
\newcommand{\D}{\mathcal{D}}
\newcommand{\E}{\mathcal{E}}
\begin{athm}{setting}{setting_2_1}
%
Let $T \in (0,\infty)$,
$d,s \in \N$,
$a_0,a_1,\dots,a_s \in \R$,
$b_0,b_1,\dots,b_s \in \R$,
let $\norm{\cdot} \colon \R^d \to [0,\infty)$ be a function which satisfies for all $u,v \in \R^d$, $s \in \R$ that $\norm{u+v} \le \norm{u}+\norm{v}$, $\norm{ su } = \abs{s} \norm{u}$, and $\norm{u} = 0$ if and only if $u = 0$,
let $\floor{\cdot}_h \colon [0,T] \to [0,T]$, $h \in (0,\infty)$, be the functions which satisfy for all $h \in (0,\infty)$, $t \in [0,T]$ that $\floor{t}_h = \max\pr[]{ [0,t] \cap \{0,h,2h,\dots\} }$,
let $\A = \cu[]{ g \colon [0,T] \to \R^d \colon \text{$f$ is analytic in $[0,T]$} }$,
for every $h \in (0,\infty)$, $n \in \{0,1,\dots,\floor{\nicefrac{T}{h}}-1\}$, $g \in \A$ let $\D \colon \A \to \A$ and $\E_h \colon \A \to \A$ satisfy
%
\begin{equation}
\pr[\big]{ \D g } \lr (nh) = \pr[\big]{ \tfrac{\dd}{\dd t} g } \lr (nh)
\qquad \text{and} \qquad
\pr[\big]{ \E_h g } \lr (nh) = g\pr[\big]{ (n+1)h } \dc
\end{equation}
%
let $f \colon \R^d \to \R^d$ be a function which satisfies that
%
\begin{equation}
\br*{ \sup_{v \in \R^d} \norm[\big]{ f(v) } } + \br*{ \sup_{v,w\in\R^d, v\neq w} \frac{ \norm{ f(v) - f(w) } }{ \norm{ v-w } } } < \infty \dc
\end{equation}
%
let $y \colon [0,T] \to \R^d$ be a measurable function which satisfies for all $t \in [0,T]$ that
%
\begin{equation}\label{eq:ode_multi}
y(t) = y(0) + \int_0^t f\pr[\big]{ y(s) } \dx s \dc
\end{equation}
%
and for every $h \in (0,\infty)$ let $Y_{0,h}, Y_{1,h} , \dots , Y_{\floor{T/h},h} \in \R^d$ satisfy for all $n \in \{0,1,\dots,\floor{\nicefrac{T}{h}}-s\}$ that $Y_{0,h} = y(0)$ and
%
\begin{equation}\label{def:multi}
\sum_{m=0}^s a_m Y_{n+m,h} = h \sum_{m=0}^s b_m f \pr[\big]{ Y_{n+m,h} } \dpp
\end{equation}
%
\end{athm}
%
%
%
\begin{athm}{definition}{def:order}
%
Assume \cref{setting_2_1}.
%
We say that \cref{def:multi} is a numerical method of order $p \in \N_0$ if and only if there exists $C \in (0,\infty)$ such that for all $h \in (0,\infty)$, $n \in \{0,1,\dots,\floor{\nicefrac{T}{h}}\}$ with $h$ sufficiently close to zero it holds that
%
\begin{equation}
\norm[\Big]{ \textstyle \sum_{m=0}^s a_m y\pr[\big]{ (n+m)h } - h \sum_{m=0}^s b_m f\pr[\big]{ y\pr[\big]{ (n+m)h } } } \le C h^{p+1} \dpp
\end{equation}
%
\end{athm}
%
%
%
\begin{athm}{lemma}{lemma:multi}
%
Assume \cref{setting_2_1} and let $p \in \N$.
%
Then \cref{def:multi} is of order $p$ if and only if there exists $C \in (0,\infty)$ such that for all $z \in \R$
with $z$ sufficiently close to one
it holds that
%
\begin{equation}\label{eq:lemma:multi}
\abs[\big]{ \textstyle\sum_{m=0}^s a_m z^m - \ln(z) \sum_{m=0}^s b_m z^m } \le C \abs{ z-1 }^{p+1}
\end{equation}
%
(cf.\ \cref{def:order}).
%
\end{athm}
%
%
%
\begin{aproof}
%
Throughout this proof let $h \in (0,\infty)$ be sufficiently small,
let $\rho \colon \R \to \R$ and $\sigma \colon \R \to \R$ be the functions which satisfy for all $z \in \R$ that
%
\begin{equation}
\rho(z) = \sum_{m=0}^s a_m z^m
\qquad \text{and} \qquad
\sigma(z) = \sum_{m=0}^s b_m z^m \dc
\end{equation}
and
without loss of generality assume that $y \in \A$.
%%% and that its radius of convergence is \emph{at least} $sh$.
%
Note that Taylor's theorem guarantees that for all $n \in \{0,1,\dots,\floor{\nicefrac{T}{h}}\}$, $k \in \N_0$ it holds that
%
\begin{equation}
\begin{split}
\pr[\Big]{ \E_h \pr[\big]{ \tfrac{\dd^k}{\dd t^k} y } } \lr (nh)
=
\pr[\big]{ \tfrac{\dd^k}{\dd t^k} y } \lr \pr[\big]{ (n+1)h }
& =
\sum_{j=0}^\infty \frac{h^j}{j!} \pr[\big]{ \tfrac{\dd^{k+j}}{\dd t^{k+j}} y } \lr \pr[]{ nh }
\\
& =
\sum_{j=0}^\infty \frac{h^j}{j!} \pr[\Big]{ \tfrac{\dd^j}{\dd t^j} \pr[\big]{ \tfrac{\dd^{k}}{\dd t^{k}} y } } \lr \pr[]{ nh }
\\
& =
\sum_{j=0}^\infty \frac{h^j}{j!} \pr[\Big]{ \D^j \pr[\big]{ \tfrac{\dd^{k}}{\dd t^{k}} y } } \lr \pr[]{ nh }
\dpp
\end{split}
\end{equation}
%
Combining this and the fact that $\D$ is a bounded linear operator (something we have not shown, but which can be shown) ensures that
%
\begin{equation}\label{eq:shift_rep}
\E_h = \exp\pr[\big]{ h \D } \dpp
\end{equation}
%
Next, observe that \cref{eq:ode_multi} assures that for all $n \in \{0,1,\dots,\floor{\nicefrac{T}{h}}-s\}$ it holds that
%
\begin{equation}
\begin{split}
&
\sum_{m=0}^s a_m y\pr[\big]{ (n+m)h } - h \sum_{m=0}^s b_m f\pr[\big]{ y( (n+m)h ) }
\\
& \quad =
\sum_{m=0}^s a_m y\pr[\big]{ (n+m)h } - h \sum_{m=0}^s b_m \pr[\big]{ \tfrac{\dd}{\dd t} y } \lr \pr[\big]{ (n+m)h }
\\
& \quad =
\sum_{m=0}^s a_m \pr[\big]{ \E_h^m y } \lr (nh) - h \sum_{m=0}^s b_m \pr[\Big]{ \E_h^m \pr[\big]{ \D y } } \lr (nh)
\dpp
\end{split}
\end{equation}
%
This, the fact that \cref{eq:shift_rep} implies that for all $g \in \A$ it holds that $(\D(\E_h g)) = (\E_h(\D g))$, the fact that $\D$ is a linear operator, and the so-called Borel functional calculus guarantee that for all $n \in \{0,1,\dots,\floor{\nicefrac{T}{h}}-s\}$ it holds that
%
\begin{equation}
\begin{split}
&
\sum_{m=0}^s a_m y\pr[\big]{ (n+m)h } - h \sum_{m=0}^s b_m f\pr[\big]{ y( (n+m)h ) }
\\
& \quad =
\sum_{m=0}^s a_m \pr[\big]{ \E_h^m y } \lr (nh) - h \pr*{ \D \sum_{m=0}^s b_m \pr[\big]{ \E_h^m y } } \lr (nh)
\\
& \quad =
\pr[\Bigg]{ \pr[\bigg]{ \sum_{m=0}^s a_m \E_h^m - h \D \sum_{m=0}^s b_m \E_h^m } y } \lr (nh)
=
\pr[\Big]{ \pr[\big]{ \rho(\E_h) - h \D \sigma(\E_h) } y } \lr (nh)
\dpp
\end{split}
\end{equation}
%
This shows that for all $n \in \{0,1,\dots,\floor{\nicefrac{T}{h}}-s\}$ it holds that
%
\begin{align}\label{eq:2_15}
&
\abs*{ \sum_{m=0}^s a_m y\pr[\big]{ (n+m)h } - h \sum_{m=0}^s b_m f\pr[\big]{ y( (n+m)h ) } }
\\
& \quad =
\abs*{ \pr[\Big]{ \pr[\big]{ \rho(\E_h) - h \D \sigma(\E_h) } y } \lr (nh) }
\le
\br*{ \sup_{g \in \A \backslash \{0\} } \frac{ \abs*{ \pr[\big]{ \pr[]{ \rho(\E_h) - h \D \sigma(\E_h) } g } \lr (nh) } }{ \abs{g(nh)} } } \abs[\big]{ y(nh) }
\dpp \nonumber
\end{align}
%
In addition, note that \cref{eq:shift_rep}, the fact that for all $g \in \A$, $t \in [0,T]$ it holds that $\lim_{z\to 0^+} (\E_z g)(t) = g(t)$ (can you see that this is true?), and the implicit function theorem demonstrate that for all $g \in \A$, $t \in [0,T]$ it holds that
%
\begin{equation}
\pr[\big]{ h\D g } \lr (t) = \pr[\big]{ \ln(\E_h) g } \lr (t) = \pr*{ \sum_{k=0}^\infty \frac{(-1)^{k}}{k+1} \pr[\big]{ \E_h - \operatorname{id} }^{k+1} g } \lr (t) \dpp
\end{equation}
%
This and the Borel functional calculus
%%%, and the fact that the spectrum of $\E_h$ is contained in the set $[-h,h] \subseteq \R$ (this is something we can prove if we know a little functional analysis)
yield that
there exists $\gamma_h \subseteq \C$ (with the spectrum of $\E_h$ contained inside of $\gamma_h$---we can discuss this, if desired) such that
for all $g \in \A$, $t \in [0,T]$ it holds that
%
\begin{equation}\label{eq:2_17}
\pr[\Big]{ \pr[\big]{ \rho(\E_h) - \ln(\E_h) \sigma(\E_h) } g } \lr (t)
=
\frac{1}{2\pi i} \int_{\gamma_h} \br[\big]{ \rho(z) - \ln(z) \sigma(z) } \pr[\big]{ (z\operatorname{id} - \E_h)^{-1} g } \lr (t) \dx z
\dpp
\end{equation}
%
Combining \cref{eq:2_15,eq:2_17} hence proves \cref{eq:lemma:multi}.
%
\end{aproof}
%
%
%
\section{Runge-Kutta methods}
%
%
%
%
%
%
\section{Stiff equations}
%
%
%
\begin{athm}{definition}{def:stable}
Let
$y_\lambda \colon [0,\infty) \to \C$, $\lambda \in \C$, be measurable functions which satisfy for all $\lambda \in \C$, $t \in [0,\infty)$ that
%
\begin{equation}
y_\lambda(t) = 1 + \lambda \int_0^t y(s) \dx s \dc
\end{equation}
%
let $h \in (0,\infty)$,
for every $\lambda \in \C$ let
$Y_{0,\lambda} , Y_{1,\lambda} , Y_{2,\lambda}, \ldots \in \R$ satisfy
$Y_{0,\lambda} = 1$,
and
assume there exists
$p,C \in (0,\infty)$
such that for all $\lambda \in \C$ with $\lambda + \bar{\lambda} \in (-\infty,0)$ it holds that
%
\begin{equation}
\sup_{n \in \N_0} \abs[\big]{ y_\lambda(nh) - Y_{n,\lambda} } \le C h^p \dpp
\end{equation}
%
%
Then the set
%
\begin{equation}
\mathcal{D} = \cu[\big]{ h\lambda \in \C \colon \textstyle\lim_{n\to\infty} Y_{n,\lambda} = 0 } \subseteq \C
\end{equation}
%
is the \emph{linear stability domain} of the numerical method
$\{Y_{n,\lambda}\}_{(n,\lambda) \in \N_0 \times \C}$.
%
%
Moreover, we say that the numerical method
$\{Y_{n,\lambda}\}_{(n,\lambda) \in \N_0 \times \C}$
is A-stable if it holds that
%
\begin{equation}
\cu[\big]{ z \in \C \colon z + \bar{z} \in (-\infty,0) } \subseteq \mathcal{D} \dpp
\end{equation}
\end{athm}
%
%
%
\begin{athm}{problem}{problem_stiff}
%
Let $T \in (0,\infty)$, $d \in \N$,
let $\norm{\cdot} \colon \R^d \to [0,\infty)$ be a function which satisfies for all $u,v \in \R^d$, $s \in \R$ that $\norm{u+v} \le \norm{u}+\norm{v}$, $\norm{ su } = \abs{s} \norm{u}$, and $\norm{u} = 0$ if and only if $u = 0$,
let $\floor{\cdot}_h \colon [0,T] \to [0,T]$, $h \in (0,\infty)$, be the functions which satisfy for all $h \in (0,\infty)$, $t \in [0,T]$ that $\floor{t}_h = \max\pr[]{ [0,t] \cap \{0,h,2h,\dots\} }$,
let $f \in C^1(\R^d,\R^d)$ satisfy
%
\begin{equation}
\br*{ \sup_{v \in \R^d} \norm[\big]{ f(v) } } + \br*{ \sup_{v,w\in\R^d, v\neq w} \frac{ \norm{ f(v) - f(w) } }{ \norm{ v-w } } } < \infty \dc
\end{equation}
%
let $y \colon [0,T] \to \R^d$ be a measurable function which satisfies for all $t \in [0,T]$ that
%
\begin{equation}
y(t) = y(0) + \int_0^t f\pr[\big]{ y(s) } \dx s \dc
\end{equation}
%
and for every $h \in (0,\infty)$ let $Y_{0,h}, Y_{1,h} , \dots , Y_{\floor{T/h},h} \in \R^d$ satisfy for all $n \in \{0,1,\dots,\floor{\nicefrac{T}{h}}-1\}$ that $Y_{0,h} = y(0)$ and
%
\begin{equation}\label{eq:3_4_theta}
Y_{n+1,h} = Y_{n,h} + \tfrac{h}{4} \br[\Big]{ f\pr[\big]{ Y_{n,h} } + 3 f\pr[\big]{ Y_{n+1,h} } } \dpp
\end{equation}
%
%
\begin{enumerate}[label=\alph*.]
%
\item
Determine whether or not \cref{eq:3_4_theta} is consistent (cf.\ \cref{def:order}).
%
%
If \cref{eq:3_4_theta} is consistent, determine its order.
%
%
\item
Determine whether or not \cref{eq:3_4_theta} is convergent (cf.\ \cref{def:converge}).
%
%
\item
Determine whether or not \cref{eq:3_4_theta} is A-stable (cf.\ \cref{def:stable}).
%
\end{enumerate}
\end{athm}
%
%
%
\begin{aproof}
\linebreaks{25}
\noindent
\end{aproof}
%
%
%
\section{Geometric numerical integration}
%
%
%
\section{Error control}
%
%
%
\section{Nonlinear algebraic systems}
%
%
%
\section{Finite difference schemes}
%
%
%
\begin{athm}{problem}{prob:finite_difference}
%
Let $N \in \N_0$, $\alpha , \beta \in \R$,
let
$f \in C(\R,\R)$
and
$u \in C^4([0,1],\R)$
satisfy for all
$ x \in [0,1] $
that
$u(0) = \alpha$,
$u(1) = \beta$,
and
%
\begin{equation}\label{eq:1d_laplace}
\pr[\big]{ \tfrac{\dd^2}{\dd x^2} u } \lr (x) = f(x) \dc
\end{equation}
%
and
let
$h_0,h_1,\dots,h_N , x_0,x_1,\dots,x_{N+1} \in [0,1]$
satisfy for all
$n \in \{0,1,\dots,N\}$
that
%
\begin{equation}\label{eq:non-uniform_grid}
0 = x_0 < x_1 < x_2 < \ldots < x_N < x_{N+1} = 1
\qquad \text{and} \qquad
h_n = x_{n+1} - x_n \dpp
\end{equation}
%
%
\begin{enumerate}[label=\alph*.]
%
\item \label{prob:finite_difference_a}
Construct a three-point finite difference scheme for approximating the solution to \cref{eq:1d_laplace} on the non-uniform grid $\{ x_n \}_{n\in\{0,1,\dots,N+1\}} \subseteq [0,1]$ given by \cref{eq:non-uniform_grid}.
%
%
\item
Determine the order of the method constructed in \cref{prob:finite_difference_a}~above.
%
%
Determine what additional assumptions are necessary (if any) for guaranteeing this order.
%
%
Compare these results with the case from Section 8.2 of the textbook (i.e., the case when $h_0 = h_1 = \ldots = h_N$).
%
%
\item \label{prob:finite_difference_c}
Write the finite difference scheme constructed in \cref{prob:finite_difference_a}~above in the form of a linear system (i.e., as a matrix-vector equation).
%
%
\item
Determine whether the linear system obtained in \cref{prob:finite_difference_c}~is always nonsingular.
%
%
If the linear system is not always nonsingular, provide sufficient conditions to guarantee that the linear system is nonsingular.
%
%
\item
Implement your finite difference scheme (i.e., the difference equations from \cref{prob:finite_difference_a}\ above or the linear system from \cref{prob:finite_difference_c}~above) in Python.
%
%
Numerically compare the approximate solution with the true solution for some ``test case.''
%
%
\end{enumerate}
\end{athm}
%
%
%
\begin{aproof}
\linebreaks{30}
\noindent
\end{aproof}
%
%
%
\end{document}