151 lines
7.7 KiB
TeX
151 lines
7.7 KiB
TeX
|
\documentclass{article}
|
||
|
\usepackage[utf8]{inputenc}
|
||
|
\usepackage{amsmath}
|
||
|
\usepackage{amssymb}
|
||
|
\usepackage{amsthm}
|
||
|
\usepackage{cancel}
|
||
|
|
||
|
\setlength\parindent{0pt}
|
||
|
|
||
|
\title{Numerical Analysis of DiffEq HW 1}
|
||
|
\author{Shakil Rafi}
|
||
|
|
||
|
\begin{document}
|
||
|
|
||
|
\maketitle
|
||
|
|
||
|
\textbf{1.1} We first need to establish a lemma:
|
||
|
\newtheorem{lem}{Lemma}
|
||
|
\begin{lem}
|
||
|
$hf(t_{{n}+\frac{1}{2}},\frac{1}{2}(y(t_n)+y(t_{n+1})) = hf(t_{n+\frac{1}{2}},y(t_{n+\frac{1}{2}})) +\eta$ where $\eta$ is $\mathcal{O}(h^3)$.
|
||
|
\end{lem}
|
||
|
\begin{align*}
|
||
|
||\eta|| = h||f(t_{n+\frac{1}{2}},\frac{1}{2}(y(t_n)+y(t_{n+1})) - f(t_{n+\frac{1}{2}},y(t_{n+\frac{1}{2}})|| \\
|
||
|
\leq h\lambda||\frac{1}{2}(y(t_n)+y(t_{n+1})) = y(t_{n+\frac{1}{2}}) \\
|
||
|
\leq h\frac{\lambda}{2}|y(t_n)+y(t_n+1) - 2y(t_{n+\frac{1}{2}})||
|
||
|
\end{align*}
|
||
|
Then Taylor expansion around $y(t_{n})$ gives us:
|
||
|
\begin{align*}
|
||
|
&||\eta|| \leq \frac{h\lambda}{2}||y+(y+hy'-2y(y+\frac{1}{2}hy')+\mathcal{O}(h^2)||\\
|
||
|
&= \frac{h\lambda}{2}||y+y+hy^2-2y^2-hyy'+\mathcal{O}(h^2)|| \\
|
||
|
&=\mathcal{O}(h^3)
|
||
|
\end{align*}
|
||
|
\textbf{Proving the convergence of the implicit midpoint rule}:
|
||
|
|
||
|
The implicit midpoint rule is:
|
||
|
\begin{align}
|
||
|
y_{n+1}=y_n+hf(t_n+\frac{h}{2},\frac{1}{2}(y_n+y_{n+1})).
|
||
|
\end{align}
|
||
|
Substituting the exact value then gives us:
|
||
|
\begin{align}
|
||
|
y(t_{n+1}) = y(t_n)+hf(t_n+\frac{h}{2}, \frac{1}{2}(y(t_n)+y(t_{n+1}))+\mathcal{O}(h^2)
|
||
|
\end{align}
|
||
|
Following closely the proof of $(1.9)$ and $(1.4)$ we subtract $(2)$ from $(1)$, and applying the lemma to get:
|
||
|
\begin{align*}
|
||
|
e_{n+1} &= e_n +h(f(t_{n+\frac{1}{2}},\frac{1}{2}(y_n+y_{n+1})) -f(t_{n+\frac{1}{2}},\frac{1}{2}(y(t_n)+y(t_{n+\frac{1}{2}})) +\mathcal{O}(h^3) \\
|
||
|
||e_{n+1}|| &\leq ||e_{n}|| +\frac{\lambda h}{2}(||e_{n+1}||+||e_n||)+\mathcal{O}(h^3)\\
|
||
|
\end{align*}
|
||
|
We then reproduce the steps in Iserles:
|
||
|
\begin{align*}
|
||
|
||e_{n+1}||-\frac{\lambda h}{2}||e_{n-1}|| &\leq ||e_n|| + \frac{\lambda h}{2}||e_n|| + \mathcal{O}(h^3) \\
|
||
|
||e_{n+1}|| &\leq \bigg(\frac{1+\frac{\lambda h}{2}}{1-\frac{\lambda h}{2}}\bigg)||e_n|| + \bigg(\frac{c}{1+\frac{\lambda}{2}}\bigg) + \mathcal{O}(h^3) \\
|
||
|
\end{align*}
|
||
|
And similar to Iserles we claim that:
|
||
|
\begin{align*}
|
||
|
||e_n|| \leq \frac{c}{\lambda}\bigg[ \bigg(\frac{1+\frac{\lambda h}{2}}{1-\frac{\lambda h}{2}}\bigg)^n -1 \bigg]h^2
|
||
|
\end{align*}
|
||
|
We will use induction to prove this step. Clearly it holds for $n=0$. Assume that the above inequality holds upto and including $n\in \mathbb{N}$, then for $n+1$ we have:
|
||
|
\begin{align*}
|
||
|
||e_{n+1}|| &\leq \bigg(\frac{1+\frac{\lambda h}{2}}{1-\frac{\lambda h}{2}}\bigg)\frac{c}{\lambda}\bigg[ \bigg(\frac{1+\frac{\lambda h}{2}}{1-\frac{\lambda h}{2}}\bigg)^n -1 \bigg]h^2+ \bigg(\frac{c}{1+\frac{\lambda}{2}}\bigg) + \mathcal{O}(h^3) \\
|
||
|
&= \frac{c}{\lambda} \bigg[\bigg(\frac{1+\frac{\lambda h}{2}}{1-\frac{\lambda h}{2}}\bigg)^{n+1}\bigg]h^2 - \frac{c}{\lambda}\bigg(\frac{1+\frac{\lambda h}{2}}{1-\frac{\lambda h}{2}}\bigg)h^2 +\mathcal{O}(h^3) \\
|
||
|
&= ||e_n|| \leq \frac{c}{\lambda}\bigg[ \bigg(\frac{1+\frac{\lambda h}{2}}{1-\frac{\lambda h}{2}}\bigg)^{n+1} -1 \bigg]h^2
|
||
|
\end{align*}
|
||
|
|
||
|
\textbf{To show that the $\theta$ method is convergent}, we define the theta method as:
|
||
|
\begin{align*}
|
||
|
y_{n+1} &= y_{n}+h[\theta f(t_n,y_n)+(1-\theta)f(t_{n+1},y_{n+1})]
|
||
|
\end{align*}
|
||
|
Whereas substituting exact solutions gives us:
|
||
|
|
||
|
\begin{align*}
|
||
|
y(t_{n+1}) y(t_n) +h[\theta f(t_n,y(t_n))+(1-\theta)f(t_{n+1},y(t_{n+1}))]+\mathcal{O}(h^3)
|
||
|
\end{align*}
|
||
|
|
||
|
Once again subtracting $(3)$ from $(4)$ gives us:
|
||
|
\begin{align*}
|
||
|
e_{n+1} &= e_n +h[\theta f(t_n,y_n)-\theta f(t_n,y(t_n))+(1-\theta)f(t_{n+1},y(t_{n+1}))+(1-\theta)f(t_{n+1},y(t_{n+1}))] +\mathcal{O}(h^3)\\
|
||
|
||e_{n+1}|| &\leq ||e_n||+h[\theta \lambda ||e_n||+(1-\theta)\lambda ||e_{n+1}||]+\mathcal{O}(h^3) \\
|
||
|
||e_{n+1}|| &\leq \bigg(\frac{1+h\theta \lambda}{1-h(1-\theta)\lambda} \bigg)||e_n||+\frac{c}{1-h(1-\theta)\lambda} +ch^3 \quad \text{for some }c
|
||
|
\end{align*}
|
||
|
Similar to the trapezoid method we will argue:
|
||
|
|
||
|
\begin{align*}
|
||
|
||e_n|| \leq \frac{c}{\lambda} \bigg[ \bigg(\frac{1+h\theta \lambda}{1-h(1-\theta)\lambda} \bigg) ^n -1 \bigg] h^2
|
||
|
\end{align*}
|
||
|
We argue via induction. Clearly we have that it is true for $n=0$ as at that point the exact and approximate solutions are the same. Assume now that it is true upto and including $n$. We need to prove for $n+1$:
|
||
|
|
||
|
\begin{align*}
|
||
|
||e_{n+1}|| &\leq \bigg(\frac{1+h\theta \lambda}{1-h(1-\theta)\lambda} \bigg)||e_n||+\frac{c}{1-h(1-\theta)\lambda} +ch^3\\
|
||
|
||e_{n+1}|| &\leq \bigg(\frac{1+h\theta \lambda}{1-h(1-\theta)\lambda} \bigg)\bigg(\frac{c}{\lambda} \bigg[ \bigg(\frac{1+h\theta \lambda}{1-h(1-\theta)\lambda} \bigg) ^n -1 \bigg] h^2\bigg)+\frac{c}{1-h(1-\theta)\lambda} +ch^3
|
||
|
\end{align*}
|
||
|
|
||
|
Now observe that $\theta$ varies between $1$ and $0$. Thus $\bigg(\frac{1+h\theta \lambda}{1-h(1-\theta)\lambda} \bigg)$ varies between $1+h\lambda$ and $\frac{1}{1-h\lambda}$, both of which are bigger than one. As such we can continue and say:
|
||
|
\begin{align*}
|
||
|
||e_n|| \leq \frac{c}{\lambda} \bigg[ \bigg(\frac{1+h\theta \lambda}{1-h(1-\theta)\lambda} \bigg) ^{n+1} -1 \bigg] h^2
|
||
|
\end{align*}
|
||
|
|
||
|
\textbf{1.2a} Let $y'=Ay$, and let $e_n=y_n-y(nh)$
|
||
|
We want to prove using induction:
|
||
|
\begin{align*}
|
||
|
||e_n||_2 \leq ||y_0||_2 \max_{\lambda \in \sigma(A)}|(1-h\lambda)^n-e^{nh\lambda}|
|
||
|
\end{align*}
|
||
|
But before that we make an observation:
|
||
|
\begin{align*}
|
||
|
e_n = y_n-y(nh)
|
||
|
\end{align*}
|
||
|
And since we are using Euler method, we can say:
|
||
|
\begin{align*}
|
||
|
y(t_{n+1}) &= y(t_n) + hy'(t) + \mathcal{O}(h^2) \\
|
||
|
y_{n+1} - y(t_{n-1}) &= y_n - y(nh) +h[(f(t_n,y(t_n))-f(t_n,y_n))]
|
||
|
\end{align*}
|
||
|
Substitution $A$ gives us:
|
||
|
\begin{align*}
|
||
|
e_{n+1} &= e_n+h[Ay_n - Ay(nh)]+\mathcal{O}(h^2) \\
|
||
|
||e_{n+1}|| &\leq ||e_n||_2 +h\lambda ||e_n||_2+\mathcal{O}(h^2)\\
|
||
|
||e_{n+1}|| &\leq ||e_n||_2(1+h\lambda) +\mathcal{O}(h^2)
|
||
|
\end{align*}
|
||
|
|
||
|
For the induction part we observe that the statement clearly holds true for $n=0$ since then we get:
|
||
|
\begin{align*}
|
||
|
||e_0||_2 &\leq ||y_0||_2 \max_{\lambda \in \sigma(A)}|(1+h\lambda)^0 - e^0| \\
|
||
|
0 &\leq 0
|
||
|
\end{align*}
|
||
|
Using it as our base case, assume the inequality holds for upto and including $n$ the for $||e_{n+1}||$ we have:
|
||
|
\begin{align*}
|
||
|
||e_{n+1}||_2 &\leq ||e_n||_2(1+h\lambda)+\mathcal{O}(h^2) \\
|
||
|
||e_{n+1}||_2 &\leq ||y_0||_2 \max_{\lambda \in \sigma(A)}|(1+h\lambda)^n - e^{nh\lambda}|(1+h\lambda)+\mathcal{O}(h^2x)\\
|
||
|
&\leq ||y_0||_2\max_{\lambda \in \sigma(A)}|(1+h\lambda)^{n+1}-[e^{nh\lambda}+h\lambda e^{nh\lambda}]| \\
|
||
|
&\leq ||y_0||_2\max_{\lambda \in \sigma(A)}|(1+h\lambda)^{n+1}-e^{(n+1)h\lambda}|
|
||
|
\end{align*}
|
||
|
|
||
|
\textbf{1.2b} From the hint, we first seek to prove $1+x \leq e^x$. Let $f(x) = e^x-x-1$, then $f'(x) = e^x-1$ and $f''(x) = e^x$. There is a global minimum of $0$ and this function is concave up, and so $f(x) > 0$ over all $x$ and hence $e^x \geq 1+x$.
|
||
|
\newline
|
||
|
Following the hint again we seek to prove that $1+x+\frac{x^2}{2} \geq e^x$. Observe the series expansion of $e^x$ is $e^x = 1+x+\frac{x^2}{2!}+\frac{x^3}{3!}\mathcal{O}(x^3)$. Thus:
|
||
|
\begin{align*}
|
||
|
1+x+\frac{x^2}{2} +\frac{x^3}{3!}+\mathcal{O}(h^4) - 1-x-\frac{x^2}{2} \\
|
||
|
\cancel{1}+\cancel{x}+\cancel{\frac{x^2}{2}} +\frac{x^3}{3!}+\mathcal{O}(h^4) - \cancel{1}-\cancel{x}-\cancel{\frac{x^2}{2}} \\
|
||
|
\frac{x^3}{3!} + \mathcal{O}(h^4) < 0 \text{ as } x\in[-1,0]
|
||
|
\end{align*}
|
||
|
We use a similar logic for the last part of the hint. Observe that: $(a-b)^n = \Sigma^n_{i=0}{n \choose i}a^{n-i}b^i = a^n -na^{n-1}b + ...$. So:
|
||
|
\begin{align*}
|
||
|
\Sigma^n_{i=1}{n \choose i}a^{n-i}b^i &- a^n+na^{n-1}b \\
|
||
|
\Sigma^n_{i=2}{n \choose i}a^{n-i}b^i &\geq 0 \text{ as a is close to being 1 and b is small}
|
||
|
\end{align*}
|
||
|
For the actual proof, let $a=e^x$ and $b=\frac{1}{2}x^2$. We then get:
|
||
|
\begin{align*}
|
||
|
e^{nx} - \frac{1}{2}nx^2e^{(n-1)x} &\leq (e^x-\frac{x^2}{2})^n \leq (1+x)^n \leq e^{nx}
|
||
|
\end{align*}
|
||
|
|
||
|
\textbf{1.4}
|
||
|
\end{document}
|