This commit is contained in:
Shakil Rafi 2024-03-11 19:41:42 -05:00
parent b40d030887
commit 6900a70b95
41 changed files with 3265 additions and 3013 deletions

BIN
.DS_Store vendored

Binary file not shown.

BIN
Dissertation/.DS_Store vendored Normal file

Binary file not shown.

View File

@ -1,81 +1,92 @@
\relax
\providecommand\zref@newlabel[2]{}
\providecommand\hyper@newdestlabel[2]{}
\@writefile{toc}{\contentsline {chapter}{\numberline {2}Brownian Motion Monte Carlo}{16}{chapter.2}\protected@file@percent }
\citation{durrett2019probability}
\citation{karatzas1991brownian}
\@writefile{toc}{\contentsline {chapter}{\numberline {2}Brownian Motion Monte Carlo}{18}{chapter.2}\protected@file@percent }
\@writefile{lof}{\addvspace {10\p@ }}
\@writefile{lot}{\addvspace {10\p@ }}
\@writefile{toc}{\contentsline {section}{\numberline {2.1}Brownian Motion Preliminaries}{16}{section.2.1}\protected@file@percent }
\newlabel{def:1.17}{{2.1.5}{17}{Of $\mathfrak {k}$}{theorem.2.1.5}{}}
\newlabel{def:1.17@cref}{{[definition][5][2,1]2.1.5}{[1][17][]17}}
\newlabel{primarysetting}{{2.1.6}{17}{Primary Setting}{theorem.2.1.6}{}}
\newlabel{primarysetting@cref}{{[definition][6][2,1]2.1.6}{[1][17][]17}}
\newlabel{(2.1.2)}{{2.1.2}{17}{Primary Setting}{equation.2.1.2}{}}
\newlabel{(2.1.2)@cref}{{[equation][2][2,1]2.1.2}{[1][17][]17}}
\newlabel{(1.12)}{{2.1.3}{17}{Primary Setting}{equation.2.1.3}{}}
\newlabel{(1.12)@cref}{{[equation][3][2,1]2.1.3}{[1][17][]17}}
\newlabel{(2.1.4)}{{2.1.4}{17}{Primary Setting}{equation.2.1.4}{}}
\newlabel{(2.1.4)@cref}{{[equation][4][2,1]2.1.4}{[1][17][]17}}
\newlabel{lemma1.1}{{2.1.7}{17}{}{theorem.2.1.7}{}}
\newlabel{lemma1.1@cref}{{[lemma][7][2,1]2.1.7}{[1][17][]17}}
\@writefile{toc}{\contentsline {section}{\numberline {2.1}Brownian Motion Preliminaries}{18}{section.2.1}\protected@file@percent }
\newlabel{iobm}{{2.1.2}{19}{Independence of Brownian Motion}{theorem.2.1.2}{}}
\newlabel{iobm@cref}{{[lemma][2][2,1]2.1.2}{[1][19][]19}}
\newlabel{def:1.17}{{2.1.5}{21}{Of $\mathfrak {k}$}{theorem.2.1.5}{}}
\newlabel{def:1.17@cref}{{[definition][5][2,1]2.1.5}{[1][21][]21}}
\newlabel{primarysetting}{{2.1.6}{21}{Primary Setting For This Chapter}{theorem.2.1.6}{}}
\newlabel{primarysetting@cref}{{[definition][6][2,1]2.1.6}{[1][21][]21}}
\newlabel{(2.1.2)}{{2.3}{21}{Primary Setting For This Chapter}{equation.2.1.3}{}}
\newlabel{(2.1.2)@cref}{{[equation][3][2,1]2.3}{[1][21][]21}}
\citation{hutzenthaler_overcoming_2020}
\newlabel{lem:1.20}{{2.1.8}{18}{}{theorem.2.1.8}{}}
\newlabel{lem:1.20@cref}{{[lemma][8][2,1]2.1.8}{[1][18][]18}}
\newlabel{(2.1.6)}{{2.1.6}{18}{Brownian Motion Preliminaries}{equation.2.1.6}{}}
\newlabel{(2.1.6)@cref}{{[equation][6][2,1]2.1.6}{[1][18][]18}}
\newlabel{eq:1.4}{{2.1}{18}{Brownian Motion Preliminaries}{equation.2.1.6}{}}
\newlabel{eq:1.4@cref}{{[section][1][2]2.1}{[1][18][]18}}
\newlabel{(1.17)}{{2.1.7}{18}{Brownian Motion Preliminaries}{equation.2.1.7}{}}
\newlabel{(1.17)@cref}{{[equation][7][2,1]2.1.7}{[1][18][]18}}
\newlabel{(1.18)}{{2.1.8}{19}{Brownian Motion Preliminaries}{equation.2.1.8}{}}
\newlabel{(1.18)@cref}{{[equation][8][2,1]2.1.8}{[1][18][]19}}
\newlabel{(1.19)}{{2.1.9}{19}{Brownian Motion Preliminaries}{equation.2.1.9}{}}
\newlabel{(1.19)@cref}{{[equation][9][2,1]2.1.9}{[1][19][]19}}
\newlabel{(1.20)}{{2.1.10}{19}{Brownian Motion Preliminaries}{equation.2.1.10}{}}
\newlabel{(1.20)@cref}{{[equation][10][2,1]2.1.10}{[1][19][]19}}
\newlabel{(1.21)}{{2.1.11}{19}{Brownian Motion Preliminaries}{equation.2.1.11}{}}
\newlabel{(1.21)@cref}{{[equation][11][2,1]2.1.11}{[1][19][]19}}
\newlabel{cor:1.20.1}{{2.1.8.1}{19}{}{corollary.2.1.8.1}{}}
\newlabel{cor:1.20.1@cref}{{[corollary][1][]2.1.8.1}{[1][19][]19}}
\newlabel{(1.12)}{{2.4}{22}{Primary Setting For This Chapter}{equation.2.1.4}{}}
\newlabel{(1.12)@cref}{{[equation][4][2,1]2.4}{[1][21][]22}}
\newlabel{(2.1.4)}{{2.5}{22}{Primary Setting For This Chapter}{equation.2.1.5}{}}
\newlabel{(2.1.4)@cref}{{[equation][5][2,1]2.5}{[1][22][]22}}
\newlabel{lemma1.1}{{2.1.7}{22}{}{theorem.2.1.7}{}}
\newlabel{lemma1.1@cref}{{[lemma][7][2,1]2.1.7}{[1][22][]22}}
\newlabel{lem:1.20}{{2.1.8}{23}{}{theorem.2.1.8}{}}
\newlabel{lem:1.20@cref}{{[lemma][8][2,1]2.1.8}{[1][23][]23}}
\newlabel{(2.1.6)}{{2.7}{23}{Brownian Motion Preliminaries}{equation.2.1.7}{}}
\newlabel{(2.1.6)@cref}{{[equation][7][2,1]2.7}{[1][23][]23}}
\newlabel{eq:1.4}{{2.1}{23}{Brownian Motion Preliminaries}{equation.2.1.7}{}}
\newlabel{eq:1.4@cref}{{[section][1][2]2.1}{[1][23][]23}}
\newlabel{(1.17)}{{2.8}{23}{Brownian Motion Preliminaries}{equation.2.1.8}{}}
\newlabel{(1.17)@cref}{{[equation][8][2,1]2.8}{[1][23][]23}}
\newlabel{(1.18)}{{2.9}{23}{Brownian Motion Preliminaries}{equation.2.1.9}{}}
\newlabel{(1.18)@cref}{{[equation][9][2,1]2.9}{[1][23][]23}}
\newlabel{(1.19)}{{2.10}{23}{Brownian Motion Preliminaries}{equation.2.1.10}{}}
\newlabel{(1.19)@cref}{{[equation][10][2,1]2.10}{[1][23][]23}}
\newlabel{(1.20)}{{2.11}{24}{Brownian Motion Preliminaries}{equation.2.1.11}{}}
\newlabel{(1.20)@cref}{{[equation][11][2,1]2.11}{[1][24][]24}}
\newlabel{(1.21)}{{2.12}{24}{Brownian Motion Preliminaries}{equation.2.1.12}{}}
\newlabel{(1.21)@cref}{{[equation][12][2,1]2.12}{[1][24][]24}}
\newlabel{cor:1.20.1}{{2.1.8.1}{24}{}{corollary.2.1.8.1}{}}
\newlabel{cor:1.20.1@cref}{{[corollary][1][]2.1.8.1}{[1][24][]24}}
\citation{rio_moment_2009}
\citation{rio_moment_2009}
\@writefile{toc}{\contentsline {section}{\numberline {2.2}Monte Carlo Approximations}{20}{section.2.2}\protected@file@percent }
\newlabel{lem:1.21}{{2.2.1}{20}{}{theorem.2.2.1}{}}
\newlabel{lem:1.21@cref}{{[lemma][1][2,2]2.2.1}{[1][20][]20}}
\@writefile{toc}{\contentsline {section}{\numberline {2.2}Monte Carlo Approximations}{25}{section.2.2}\protected@file@percent }
\newlabel{lem:1.21}{{2.2.1}{25}{}{theorem.2.2.1}{}}
\newlabel{lem:1.21@cref}{{[lemma][1][2,2]2.2.1}{[1][24][]25}}
\newlabel{corollary:1.11.1.}{{2.2.1.1}{25}{}{corollary.2.2.1.1}{}}
\newlabel{corollary:1.11.1.@cref}{{[corollary][1][]2.2.1.1}{[1][25][]25}}
\citation{grohsetal}
\newlabel{corollary:1.11.1.}{{2.2.1.1}{21}{}{corollary.2.2.1.1}{}}
\newlabel{corollary:1.11.1.@cref}{{[corollary][1][]2.2.1.1}{[1][21][]21}}
\newlabel{(1.26)}{{2.2.5}{21}{}{equation.2.2.5}{}}
\newlabel{(1.26)@cref}{{[equation][5][2,2]2.2.5}{[1][21][]21}}
\newlabel{cor:1.22.2}{{2.2.1.2}{21}{}{corollary.2.2.1.2}{}}
\newlabel{cor:1.22.2@cref}{{[corollary][2][]2.2.1.2}{[1][21][]21}}
\@writefile{toc}{\contentsline {section}{\numberline {2.3}Bounds and Covnvergence}{21}{section.2.3}\protected@file@percent }
\newlabel{lem:1.21}{{2.3.1}{21}{}{theorem.2.3.1}{}}
\newlabel{lem:1.21@cref}{{[lemma][1][2,3]2.3.1}{[1][21][]21}}
\newlabel{lem:1.22}{{2.3.2}{22}{}{theorem.2.3.2}{}}
\newlabel{lem:1.22@cref}{{[lemma][2][2,3]2.3.2}{[1][22][]22}}
\newlabel{lem:1.25}{{2.3.3}{24}{}{theorem.2.3.3}{}}
\newlabel{lem:1.25@cref}{{[lemma][3][2,3]2.3.3}{[1][24][]24}}
\newlabel{(1.46)}{{2.3.16}{24}{Bounds and Covnvergence}{equation.2.3.16}{}}
\newlabel{(1.46)@cref}{{[equation][16][2,3]2.3.16}{[1][24][]24}}
\newlabel{(1.47)}{{2.3.17}{24}{Bounds and Covnvergence}{equation.2.3.17}{}}
\newlabel{(1.47)@cref}{{[equation][17][2,3]2.3.17}{[1][24][]24}}
\newlabel{(1.48)}{{2.3.18}{24}{Bounds and Covnvergence}{equation.2.3.18}{}}
\newlabel{(1.48)@cref}{{[equation][18][2,3]2.3.18}{[1][24][]24}}
\newlabel{cor:1.25.1}{{2.3.3.1}{24}{}{corollary.2.3.3.1}{}}
\newlabel{cor:1.25.1@cref}{{[corollary][1][]2.3.3.1}{[1][24][]24}}
\newlabel{(2.48)}{{2.3.26}{26}{}{equation.2.3.26}{}}
\newlabel{(2.48)@cref}{{[equation][26][2,3]2.3.26}{[1][26][]26}}
\newlabel{2.3.27}{{2.3.27}{26}{}{equation.2.3.27}{}}
\newlabel{2.3.27@cref}{{[equation][27][2,3]2.3.27}{[1][26][]26}}
\newlabel{2.3.29}{{2.3.28}{26}{Bounds and Covnvergence}{equation.2.3.28}{}}
\newlabel{2.3.29@cref}{{[equation][28][2,3]2.3.28}{[1][26][]26}}
\newlabel{(1.26)}{{2.5}{26}{}{equation.2.2.5}{}}
\newlabel{(1.26)@cref}{{[equation][5][2,2]2.5}{[1][25][]26}}
\newlabel{cor:1.22.2}{{2.2.1.2}{26}{}{corollary.2.2.1.2}{}}
\newlabel{cor:1.22.2@cref}{{[corollary][2][]2.2.1.2}{[1][26][]26}}
\@writefile{toc}{\contentsline {section}{\numberline {2.3}Bounds and Covnvergence}{26}{section.2.3}\protected@file@percent }
\newlabel{lem:1.21}{{2.3.1}{26}{}{theorem.2.3.1}{}}
\newlabel{lem:1.21@cref}{{[lemma][1][2,3]2.3.1}{[1][26][]26}}
\newlabel{lem:1.22}{{2.3.2}{27}{}{theorem.2.3.2}{}}
\newlabel{lem:1.22@cref}{{[lemma][2][2,3]2.3.2}{[1][27][]27}}
\newlabel{lem:1.25}{{2.3.3}{28}{}{theorem.2.3.3}{}}
\newlabel{lem:1.25@cref}{{[lemma][3][2,3]2.3.3}{[1][28][]28}}
\newlabel{(1.46)}{{2.16}{29}{Bounds and Covnvergence}{equation.2.3.16}{}}
\newlabel{(1.46)@cref}{{[equation][16][2,3]2.16}{[1][29][]29}}
\newlabel{(1.47)}{{2.17}{29}{Bounds and Covnvergence}{equation.2.3.17}{}}
\newlabel{(1.47)@cref}{{[equation][17][2,3]2.17}{[1][29][]29}}
\newlabel{(1.48)}{{2.18}{29}{Bounds and Covnvergence}{equation.2.3.18}{}}
\newlabel{(1.48)@cref}{{[equation][18][2,3]2.18}{[1][29][]29}}
\newlabel{cor:1.25.1}{{2.3.3.1}{29}{}{corollary.2.3.3.1}{}}
\newlabel{cor:1.25.1@cref}{{[corollary][1][]2.3.3.1}{[1][29][]29}}
\newlabel{tentpole_1}{{2.3.4}{30}{}{theorem.2.3.4}{}}
\newlabel{tentpole_1@cref}{{[theorem][4][2,3]2.3.4}{[1][30][]30}}
\newlabel{(2.48)}{{2.26}{31}{}{equation.2.3.26}{}}
\newlabel{(2.48)@cref}{{[equation][26][2,3]2.26}{[1][31][]31}}
\newlabel{2.3.27}{{2.27}{31}{}{equation.2.3.27}{}}
\newlabel{2.3.27@cref}{{[equation][27][2,3]2.27}{[1][31][]31}}
\citation{hjw2020}
\citation{hjw2020}
\citation{hjw2020}
\newlabel{2.3.34}{{2.3.34}{28}{Bounds and Covnvergence}{equation.2.3.34}{}}
\newlabel{2.3.34@cref}{{[equation][34][2,3]2.3.34}{[1][28][]28}}
\newlabel{2.3.29}{{2.28}{32}{Bounds and Covnvergence}{equation.2.3.28}{}}
\newlabel{2.3.29@cref}{{[equation][28][2,3]2.28}{[1][31][]32}}
\newlabel{(2.3.33)}{{2.33}{34}{Bounds and Covnvergence}{equation.2.3.33}{}}
\newlabel{(2.3.33)@cref}{{[equation][33][2,3]2.33}{[1][33][]34}}
\newlabel{2.3.34}{{2.34}{34}{Bounds and Covnvergence}{equation.2.3.34}{}}
\newlabel{2.3.34@cref}{{[equation][34][2,3]2.34}{[1][34][]34}}
\newlabel{(2.3.35)}{{2.35}{34}{Bounds and Covnvergence}{equation.2.3.35}{}}
\newlabel{(2.3.35)@cref}{{[equation][35][2,3]2.35}{[1][34][]34}}
\@setckpt{Brownian_motion_monte_carlo}{
\setcounter{page}{30}
\setcounter{equation}{35}
\setcounter{page}{35}
\setcounter{equation}{36}
\setcounter{enumi}{2}
\setcounter{enumii}{0}
\setcounter{enumiii}{0}
@ -96,10 +107,25 @@
\setcounter{@ppsaveapp}{0}
\setcounter{AM@survey}{0}
\setcounter{parentequation}{0}
\setcounter{footdir@label}{0}
\setcounter{tmpA}{0}
\setcounter{tmpB}{0}
\setcounter{tmpC}{0}
\setcounter{tmpD}{0}
\setcounter{tmpE}{0}
\setcounter{tmpF}{0}
\setcounter{Hijriday}{0}
\setcounter{Hijrimonth}{0}
\setcounter{Hijriyear}{0}
\setcounter{subfigure}{0}
\setcounter{lofdepth}{1}
\setcounter{subtable}{0}
\setcounter{lotdepth}{1}
\setcounter{section@level}{1}
\setcounter{Item}{32}
\setcounter{Item}{35}
\setcounter{Hfootnote}{0}
\setcounter{bookmark@seq@number}{14}
\setcounter{Hy@AnnotLevel}{0}
\setcounter{bookmark@seq@number}{15}
\setcounter{NAT@ctr}{0}
\setcounter{ALG@line}{0}
\setcounter{ALG@rem}{0}
@ -108,6 +134,8 @@
\setcounter{ALG@blocknr}{1}
\setcounter{ALG@storecount}{0}
\setcounter{ALG@tmpcounter}{0}
\setcounter{@stackindex}{0}
\setcounter{ROWcellindex@}{0}
\setcounter{lstnumber}{1}
\setcounter{theorem}{4}
\setcounter{corollary}{0}

View File

@ -1,4 +1,4 @@
\chapter{Brownian Motion Monte Carlo}
\chapter{Brownian Motion Monte Carlo}\label{chp:2}
\section{Brownian Motion Preliminaries}
We will present here some standard invariants of Brownian motions. The proofs are standard and can be found in for instance \cite{durrett2019probability} and \cite{karatzas1991brownian}.
@ -411,7 +411,7 @@ Observe that (\ref{2.3.29}) guarantees that $\mathbb{F}^d_t \subseteq \mathcal{F
\item it holds for all $d\in \N$ that $\{ A \in \mathcal{F}: \mathbb{P}(A) = 0 \} \subseteq \mathbb{F}^d_0$
\item it holds for all $d \in \N$, $t\in [0,T]$, that $\mathbb{F}^d_t = \bigcap_{s \in (t,T]}\mathbb{F}^d_s$.
\end{enumerate}
Combining item (I), item (II), (\ref{2.3.29}) and \cite[Lemma 2.17]{hjw2020} assures us that for all $d \in \N$ it holds that $W^{d,0}:[0, T] \times \Omega \rightarrow \R^d$ is a standard $\left(\Omega, \mathcal{F}, \mathbb{P}, \left(\mathbb{F}^d_t\right)_{t\in [0, T]}\right)$-Brownian Brownian motion. In addition $(58)$ ensures that it is the case that for all $d\in N$, $x\in \R^d$ it holds that $[0,T] \times \Omega \ni (t,\omega) \mapsto x + W^{d,0}_t(\omega) \in \R^d$ is an $\left(\mathbb{F}^d_t\right)_{t\in [0,T]}/\mathcal{B}\left(\R^d\right)$-adapted stochastic process with continuous sample paths.
Combining item (I), item (II), (\ref{2.3.29}) and \cite[Lemma 2.17]{hjw2020} assures us that for all $d \in \N$ it holds that $W^{d,0}:[0, T] \times \Omega \rightarrow \R^d$ is a standard $\left(\Omega, \mathcal{F}, \mathbb{P}, \left(\mathbb{F}^d_t\right)_{t\in [0, T]}\right)$- Brownian motion. In addition $(58)$ ensures that it is the case that for all $d\in N$, $x\in \R^d$ it holds that $[0,T] \times \Omega \ni (t,\omega) \mapsto x + W^{d,0}_t(\omega) \in \R^d$ is an $\left(\mathbb{F}^d_t\right)_{t\in [0,T]}/\mathcal{B}\left(\R^d\right)$-adapted stochastic process with continuous sample paths.
\medskip
This and the fact that for all $d\in \N$, $t\in [0,T]$, $x\in \R^d$ it holds that $a_d(t,x) = 0$, and the fact that for all $d\in \N$, $t \in [0,T]$, $x$,$v\in \R^d$ it holds that $b_d(t,x)v = v$ yield that for all $d \in \N$, $x\in \R^d$ it holds that $[0,T] \times \Omega \ni (t,\omega) \mapsto x+W^{d,0}_t(\omega) \in \R^d$ satisfies for all $t\in [0,T]$ it holds $\mathbb{P}$-a.s. that:

View File

@ -1,38 +1,62 @@
\relax
\providecommand\zref@newlabel[2]{}
\providecommand\hyper@newdestlabel[2]{}
\citation{golub2013matrix}
\citation{durrett2019probability}
\@writefile{toc}{\contentsline {chapter}{\numberline {1}Introduction.}{5}{chapter.1}\protected@file@percent }
\citation{tsaban_harnessing_2022}
\citation{davies_signature_2021}
\citation{zhao_space-based_2023}
\citation{wu2022sustainable}
\citation{strubell2019energy}
\citation{e_multilevel_2019}
\citation{e_multilevel_2021}
\citation{hutzenthaler_strong_2021}
\@writefile{toc}{\contentsline {chapter}{\numberline {1}Introduction.}{1}{chapter.1}\protected@file@percent }
\@writefile{lof}{\addvspace {10\p@ }}
\@writefile{lot}{\addvspace {10\p@ }}
\@writefile{toc}{\contentsline {section}{\numberline {1.1}Notation, Definitions \& Basic notions.}{5}{section.1.1}\protected@file@percent }
\@writefile{toc}{\contentsline {subsection}{\numberline {1.1.1}Norms and Inner Product}{5}{subsection.1.1.1}\protected@file@percent }
\@writefile{toc}{\contentsline {subsection}{\numberline {1.1.2}Probability Space and Brownian Motion}{6}{subsection.1.1.2}\protected@file@percent }
\newlabel{1.9}{{1.1.13}{8}{Strong Solution of Stochastic Differential Equation}{theorem.1.1.13}{}}
\newlabel{1.9@cref}{{[definition][13][1,1]1.1.13}{[1][7][]8}}
\newlabel{1.5}{{1.1.8}{8}{Strong Solution of Stochastic Differential Equation}{equation.1.1.8}{}}
\newlabel{1.5@cref}{{[equation][8][1,1]1.1.8}{[1][8][]8}}
\@writefile{toc}{\contentsline {subsection}{\numberline {1.1.3}Lipschitz and Related Notions}{9}{subsection.1.1.3}\protected@file@percent }
\newlabel{def:1.13}{{1.1.15}{9}{Globally Lipschitz Function}{theorem.1.1.15}{}}
\newlabel{def:1.13@cref}{{[definition][15][1,1]1.1.15}{[1][8][]9}}
\@writefile{toc}{\contentsline {section}{\numberline {1.1}Motivation}{1}{section.1.1}\protected@file@percent }
\citation{bhj20}
\citation{karatzas1991brownian}
\citation{da_prato_zabczyk_2002}
\newlabel{def:1.14}{{1.1.16}{10}{Locally Lipschitz Function}{theorem.1.1.16}{}}
\newlabel{def:1.14@cref}{{[definition][16][1,1]1.1.16}{[1][10][]10}}
\@writefile{toc}{\contentsline {subsection}{\numberline {1.1.4}Kolmogorov Equations}{10}{subsection.1.1.4}\protected@file@percent }
\newlabel{(1.7)}{{1.1.16}{11}{Kolmogorov Equation}{equation.1.1.16}{}}
\newlabel{(1.7)@cref}{{[equation][16][1,1]1.1.16}{[1][10][]11}}
\@writefile{toc}{\contentsline {subsection}{\numberline {1.1.5}Linear Algebra Notation and Definitions}{12}{subsection.1.1.5}\protected@file@percent }
\newlabel{def:1.1.23}{{1.1.23}{12}{Column, Row, and General Vector Notation}{theorem.1.1.23}{}}
\newlabel{def:1.1.23@cref}{{[definition][23][1,1]1.1.23}{[1][12][]12}}
\@writefile{toc}{\contentsline {subsection}{\numberline {1.1.6}$O$-type notation and function growth}{13}{subsection.1.1.6}\protected@file@percent }
\newlabel{1.1.20.1}{{1.1.26.1}{14}{Bounded functions and $O$-type notation}{corollary.1.1.26.1}{}}
\newlabel{1.1.20.1@cref}{{[corollary][1][]1.1.26.1}{[1][14][]14}}
\newlabel{1.1.20.2}{{1.1.26.2}{14}{}{corollary.1.1.26.2}{}}
\newlabel{1.1.20.2@cref}{{[corollary][2][]1.1.26.2}{[1][14][]14}}
\@writefile{toc}{\contentsline {subsection}{\numberline {1.1.7}The Iverson Bracket}{15}{subsection.1.1.7}\protected@file@percent }
\citation{grohs2019spacetime}
\citation{grohs2019spacetime}
\citation{golub2013matrix}
\citation{durrett2019probability}
\citation{graham_concrete_1994}
\@writefile{toc}{\contentsline {section}{\numberline {1.2}Notation, Definitions \& Basic notions.}{3}{section.1.2}\protected@file@percent }
\@writefile{toc}{\contentsline {subsection}{\numberline {1.2.1}Norms and Inner Products}{3}{subsection.1.2.1}\protected@file@percent }
\@writefile{toc}{\contentsline {subsection}{\numberline {1.2.2}Probability Space and Brownian Motion}{4}{subsection.1.2.2}\protected@file@percent }
\newlabel{def:brown_motion}{{1.2.11}{5}{Brownian Motion Over a Stochastic Basis}{theorem.1.2.11}{}}
\newlabel{def:brown_motion@cref}{{[definition][11][1,2]1.2.11}{[1][5][]5}}
\newlabel{1.9}{{1.2.14}{6}{Strong Solution of Stochastic Differential Equation}{theorem.1.2.14}{}}
\newlabel{1.9@cref}{{[definition][14][1,2]1.2.14}{[1][6][]6}}
\newlabel{1.5}{{1.9}{6}{Strong Solution of Stochastic Differential Equation}{equation.1.2.9}{}}
\newlabel{1.5@cref}{{[equation][9][1,2]1.9}{[1][6][]6}}
\@writefile{toc}{\contentsline {subsection}{\numberline {1.2.3}Lipschitz and Related Notions}{7}{subsection.1.2.3}\protected@file@percent }
\newlabel{def:1.13}{{1.2.16}{7}{Globally Lipschitz Function}{theorem.1.2.16}{}}
\newlabel{def:1.13@cref}{{[definition][16][1,2]1.2.16}{[1][7][]7}}
\newlabel{def:1.14}{{1.2.17}{8}{Locally Lipschitz Function}{theorem.1.2.17}{}}
\newlabel{def:1.14@cref}{{[definition][17][1,2]1.2.17}{[1][8][]8}}
\citation{da_prato_zabczyk_2002}
\@writefile{toc}{\contentsline {subsection}{\numberline {1.2.4}Kolmogorov Equations}{9}{subsection.1.2.4}\protected@file@percent }
\newlabel{(1.7)}{{1.17}{9}{Kolmogorov Equation}{equation.1.2.17}{}}
\newlabel{(1.7)@cref}{{[equation][17][1,2]1.17}{[1][9][]9}}
\@writefile{toc}{\contentsline {subsection}{\numberline {1.2.5}Linear Algebra Notation and Definitions}{10}{subsection.1.2.5}\protected@file@percent }
\newlabel{def:1.1.23}{{1.2.25}{11}{Column and Row Notation}{theorem.1.2.25}{}}
\newlabel{def:1.1.23@cref}{{[definition][25][1,2]1.2.25}{[1][11][]11}}
\@writefile{toc}{\contentsline {subsection}{\numberline {1.2.6}$O$-type Notation and Function Growth}{13}{subsection.1.2.6}\protected@file@percent }
\newlabel{1.1.20.1}{{1.2.31.1}{13}{Bounded functions and $O$-type notation}{corollary.1.2.31.1}{}}
\newlabel{1.1.20.1@cref}{{[corollary][1][]1.2.31.1}{[1][13][]13}}
\newlabel{1.1.20.2}{{1.2.31.2}{14}{}{corollary.1.2.31.2}{}}
\newlabel{1.1.20.2@cref}{{[corollary][2][]1.2.31.2}{[1][14][]14}}
\@writefile{toc}{\contentsline {subsection}{\numberline {1.2.7}The Concatenation of Vectors \& Functions}{15}{subsection.1.2.7}\protected@file@percent }
\newlabel{sum_of_frown_frown_of_sum}{{1.2.34.1}{15}{}{corollary.1.2.34.1}{}}
\newlabel{sum_of_frown_frown_of_sum@cref}{{[corollary][1][]1.2.34.1}{[1][15][]15}}
\newlabel{concat_fun_fun_concat}{{1.2.35.1}{16}{}{corollary.1.2.35.1}{}}
\newlabel{concat_fun_fun_concat@cref}{{[corollary][1][]1.2.35.1}{[1][16][]16}}
\newlabel{par_cont}{{1.2.36}{17}{}{theorem.1.2.36}{}}
\newlabel{par_cont@cref}{{[lemma][36][1,2]1.2.36}{[1][17][]17}}
\@setckpt{Introduction}{
\setcounter{page}{16}
\setcounter{equation}{34}
\setcounter{page}{18}
\setcounter{equation}{38}
\setcounter{enumi}{3}
\setcounter{enumii}{0}
\setcounter{enumiii}{0}
@ -41,7 +65,7 @@
\setcounter{mpfootnote}{0}
\setcounter{part}{1}
\setcounter{chapter}{1}
\setcounter{section}{1}
\setcounter{section}{2}
\setcounter{subsection}{7}
\setcounter{subsubsection}{0}
\setcounter{paragraph}{0}
@ -53,10 +77,25 @@
\setcounter{@ppsaveapp}{0}
\setcounter{AM@survey}{0}
\setcounter{parentequation}{0}
\setcounter{footdir@label}{0}
\setcounter{tmpA}{0}
\setcounter{tmpB}{0}
\setcounter{tmpC}{0}
\setcounter{tmpD}{0}
\setcounter{tmpE}{0}
\setcounter{tmpF}{0}
\setcounter{Hijriday}{0}
\setcounter{Hijrimonth}{0}
\setcounter{Hijriyear}{0}
\setcounter{subfigure}{0}
\setcounter{lofdepth}{1}
\setcounter{subtable}{0}
\setcounter{lotdepth}{1}
\setcounter{section@level}{2}
\setcounter{Item}{23}
\setcounter{Item}{26}
\setcounter{Hfootnote}{0}
\setcounter{bookmark@seq@number}{10}
\setcounter{Hy@AnnotLevel}{0}
\setcounter{bookmark@seq@number}{11}
\setcounter{NAT@ctr}{0}
\setcounter{ALG@line}{0}
\setcounter{ALG@rem}{0}
@ -65,8 +104,10 @@
\setcounter{ALG@blocknr}{1}
\setcounter{ALG@storecount}{0}
\setcounter{ALG@tmpcounter}{0}
\setcounter{@stackindex}{0}
\setcounter{ROWcellindex@}{0}
\setcounter{lstnumber}{1}
\setcounter{theorem}{28}
\setcounter{theorem}{36}
\setcounter{corollary}{0}
\setcounter{lstlisting}{0}
}

View File

@ -1,7 +1,8 @@
\chapter{Introduction.}
\section{Motivation}
Artificial neural networks represent a sea change in computing. They have successfully been used in a wide range of applications, from protein-folding in \cite{tsaban_harnessing_2022}, knot theory in \cite{davies_signature_2021}, and extracting data from gravitational waves in \cite{zhao_space-based_2023}.
Artificial neural networks represent a sea change in computing. They have successfully been used in a wide range of applications, from protein-folding in \cite{tsaban_harnessing_2022}, knot theory in \cite{davies_signature_2022}, and extracting data from gravitational waves in \cite{zhao_space-based_2023}.
\\~\\
As neural networks become more ubiquitous, we see that the number of parameters required to train them increases, which poses two problems: accessibility on low-power devices and the amount of energy needed to train these models, see for instance \cite{wu2022sustainable} and \cite{strubell2019energy}. Parameter estimates become increasingly crucial in an increasingly climate-challenged world. That we know strict and precise upper bounds on parameter estimates tells us when training becomes wasteful, in some sense, and when, perhaps, different approaches may be needed.
\\~\\
@ -122,7 +123,7 @@ is adapted to the filtration $\mathbb{F}:= (\mathcal{F}_i )_{i \in [0,T]}$
\end{align}
\medskip
A strong solution to the stochastic differential equation (\ref{1.5}) on probability space $(\Omega, \mathcal{F}, \mathbb{P}, (\mathbb{F}_t)_{t \in [0,T]})$, w.r.t Brownian motion $\mathcal{W}$, w.r.t to initial condition $\mathcal{X}_0 = 0$ is a stochastic process $(\mathcal{X}_t)_{t\in[0,\infty)}$ satisfying that:
A strong solution to the stochastic differential equation (\ref{1.5}) on probability space \\ $(\Omega, \mathcal{F}, \mathbb{P}, (\mathbb{F}_t)_{t \in [0,T]})$, w.r.t Brownian motion $\mathcal{W}$, w.r.t to initial condition $\mathcal{X}_0 = 0$ is a stochastic process $(\mathcal{X}_t)_{t\in[0,\infty)}$ satisfying that:
\begin{enumerate}[label = (\roman*)]
\item $\mathcal{X}_t$ is adapted to the filtration $(\mathbb{F}_t)_{t \in [0,T]}$.
\item $\mathbb{P}(\mathcal{X}_0 = 0) =1$.
@ -289,7 +290,7 @@ for all $\epsilon \in (0,\infty)$.
Thus under this notation the component-wise square of $A$ is $\lp \lb A \rb_{*,*}\rp^2$, the component-wise $\sin$ is $\sin\lp \lb A \rb_{*,*}\rp$ and the Hadamard product of $A,B \in \R^{m \times n}$ then becomes $ A \odot B = \ \lb A \rb_{*,*} \times \lb B \rb_{*,*}$.
\end{definition}
\begin{remark}
Where we are dealing with a row vector $x \in \R^{d \times 1}$ and it is evident from the context we may choose to write $f\lp \lb x\rb_* \rp$.
Where we are dealing with a column vector $x \in \R^{d \times 1}$ and it is evident from the context we may choose to write $f\lp \lb x\rb_* \rp$.
\end{remark}
\begin{definition}[The Diagonalization Operator]
Let $m_1,m_2,n_1,n_2 \in \N$. Given $A \in \R^{m_1 \times n_1}$ and $B \in \R^{m_2\times n_2}$, we will denote by $\diag\lp A,B\rp$ the matrix:

Binary file not shown.

View File

@ -0,0 +1,8 @@
% Appendix A File
\refstepcounter{chapter}%
\chapter*{\thechapter \quad Appendix A Title}
\label{appendixB}
Your content goes here.

View File

@ -0,0 +1,8 @@
% Appendix B File
\refstepcounter{chapter}%
\chapter*{\thechapter \quad Appendix B Title}
\label{appendixB}
Your content goes here.

View File

@ -0,0 +1 @@
\section{Chapter 1}

View File

@ -0,0 +1,5 @@
\section{Concluding Remarks}
\thispagestyle{empty}
\subsection{Summary}
\subsection{Future directions}

View File

@ -0,0 +1,53 @@
@inproceedings{key1,
title = "Title of the conference paper",
author = "John Author",
booktitle = "Name of the Conference Proceedings",
address = "Location",
month = "Mon.",
year = "Year"
}
@article{key2,
title = "Title of the Journal paper",
author = "Andrew Wiles and Ingrid Daubechies",
journal = "Name of the Journal",
volume = "vol.",
number = "iss.",
pages = "page",
month = "Mon.",
year = "Year"
}
@book{key3,
title = "Book Title",
author = "John Smith and Pierre Fermat and Clark Maxwell",
edition = "3rd",
publisher = "Publisher Name",
year = "Year",
address = "Location"
}
@misc{key4,
title = "Title of the reference",
howpublished = "Publishing details"
}
@phdthesis{key5,
title = "Thesis/Dissertation Title",
author = "Author Name",
school = "School Name",
year = "Year"
}
@book{guidelines,
title = "Thesis and Dissertation Guidelines",
author = "KAUST",
edition = "1",
publisher = "KAUST",
year = "2011",
address = "Thuwal, Makkah Province, Saudi Arabia"
}

View File

@ -0,0 +1,307 @@
%% UA doctoral thesis template to accompany uamaththesis.cls
%% last updated April 2018. Please suggest corrections, changes and
%% improvements to Dan Luecking, SCEN 354.
% Load the class file. This takes the options "chapters", "MS", "MA, and
% "PhD". The last 3 are mutually exclusive and PhD is the default.
% The chapters option indicates that your thesis has chapter subdivisions.
% If it is present, this class loads the LaTeX report class, otherwise
% it loads the article class. In either case, it is loaded with the
% "12pt" option.
%
% The MS and MA options indicates that this is a Master's thesis rather
% than a doctoral dissertation. This has not been thoroughly tested.
% Pretty much all they do is to substitute "thesis" for "dissertation" in
% a few commands and text. They also sets the default name of the
% degree, if you don't provide one (see \degreename below)
%
% Any other options are passed directly to the loaded class: article or
% report
%
\documentclass{uamaththesis}
% Loading packages (amsmath, amssymb, amsthm, setspace, and tocbibind
% are already preloaded). Do _not_ use tocloft unless you know how to
% customize it to satisfy the UA thesis requirements.
%
% For more flexibility in number formating, load the enumerate package.
% However, make sure you satisfy the Grad school thesis checkers.
\usepackage{enumerate}
\usepackage{amsmath,amssymb,amsthm,enumerate,graphicx}
\usepackage[all]{xy}
\usepackage{calligra,mathrsfs}
\usepackage[hidelinks]{hyperref}
\usepackage{bm,color}
\usepackage{amsfonts}
\usepackage{amscd}
\usepackage[latin2]{inputenc}
\usepackage{t1enc}
\usepackage[mathscr]{eucal}
\usepackage{indentfirst}
\usepackage{graphics}
\usepackage{pict2e}
\usepackage{epic}
\numberwithin{equation}{section}
%\usepackage[margin=2.9cm]{geometry}
\usepackage{epstopdf}
\usepackage{tikz-cd}
\usepackage{tikz}
\usetikzlibrary{shapes,arrows}
\usetikzlibrary{decorations.markings}
\usepackage{mathtools}
\usepackage{cases, caption}
\allowdisplaybreaks
\tikzset{degil/.style={line width=0.75pt,double distance=2pt,
decoration={markings,
mark= at position 0.5 with {
\node[transform shape] (tempnode) {$\backslash$};
%\draw[thick] (tempnode.north east) -- (tempnode.south west);
}
},
postaction={decorate}
}
}
\tikzset{
commutative diagrams/.cd,
arrow style=tikz,
diagrams={>=open triangle 45, line width=0.5pt, double distance=2pt}}
% your definitions
%\newcommand{\disk}{\mathbb{D}} % for example
\theoremstyle{question}
\newtheorem{question}[theorem]{Question}
\theoremstyle{setup}
\newtheorem{setup}[theorem]{Set-up}
\theoremstyle{conjecture}
\newtheorem{conjecture}[theorem]{Conjecture}
%%% to repeat theorem
\theoremstyle{ams-restatedtheorem}
% the argument of restatedtheorem* shouldn't ever actually be used
\newtheorem*{restatedtheorem*}{}
\newenvironment{restatement}[2][]{%
\ifempty{#1}
\begin{restatedtheorem*}[\autoref*{#2}]%
\else%
\begin{restatedtheorem*}[\autoref*{#2} (#1)]%
\fi%
}%
{\end{restatedtheorem*}}
\makeatother
%%% Shortcuts and symbols
\newcommand{\widesim}[2][1.5]{
\mathrel{\overset{#2}{\scalebox{#1}[1]{$\sim$}}}
}
\newcommand\norm[1]{\left\lVert#1\right\rVert}
\newcommand{\calA}{ \mathcal{ A } }
\newcommand{\calB}{ \mathcal{ B } }
\newcommand{\la}{\langle}
\newcommand{\ra}{\rangle}
\newcommand{\NN}{\mathbb{N}}
\newcommand{\QQ}{\mathbb{Q}}
\newcommand{\RR}{\mathbb{R}}
\newcommand{\ZZ}{\mathbb{Z}}
\newcommand{\CC}{\mathbb{C}}
\newcommand{\RP}{\mathbb{RP}}
\newcommand{\PP}{\mathbb{P}}
\newcommand{\Hom}{\text{Hom}}
\newcommand{\bs}{\backslash}
\newcommand{\mf}{\mathfrak}
\newcommand{\mc}{\mathcal}
\newcommand{\grade}{\text{grade}}
\newcommand{\depth}{\text{depth}}
\newcommand{\height}{\text{height}}
\newcommand{\sbt}{\,\begin{picture}(-1,1)(-1,-2)\circle*{2}\end{picture}\ }
\DeclareMathOperator{\Homs}{\mathscr{H}\text{\kern -3pt {\calligra\large om}}\,}
\DeclareSymbolFont{extraup}{U}{zavm}{m}{n}
\DeclareMathSymbol{\varheart}{\mathalpha}{extraup}{86}
\DeclareMathSymbol{\vardiamond}{\mathalpha}{extraup}{87}
\DeclareMathSymbol{\varspade}{\mathalpha}{extraup}{81}
% Theorem-like environments:
% These are predefined, but you may redo any of them with shorter names
% if you prefer. (Note: latex does not allow you to redefine a
% theorem-like environment, so if you want to change the style of one of
% these, use a different name, e.g. prop instead of proposition)
% \theoremstyle{plain}
%\newtheorem{theorem}{Theorem}[section]
%\newtheorem{proposition}[theorem]{Proposition}
%\newtheorem{corollary}[theorem]{Corollary}
%\newtheorem{lemma}[theorem]{Lemma}
% \theoremstyle{definition}
%\newtheorem{definition}{Definition}[section]
%\newtheorem{example}{Example}[section]
% \theoremstyle{remark}
%\newtheorem{remark}{Remark}
% Your thesis title:
% You must use title case: generally every word is capitalized
% except articles (the, a, an) prepositions (of, to, in, with, for, etc.)
% and conjunctions (and, or, but, while, etc.)
% Required.
\title{THESIS TITLE}
% Your name as UAConnect knows you:
% Required.
\author{NAME HERE}
% information about your bachelors degree or the equivalent.
% Required.
\bachelorinstitution{UNIVERSITY NAME}
\bachelordegree{DEGREE}
\bacheloryear{YEAR}
%
% If you have more than one bachelors degree:
%\bachelorinstitutiontwo{Medium State University}
%\bachelordegreetwo{Bachelor of Science in Science}
%\bacheloryeartwo{2011}
% and so on, up to \bachelor...three
% information about your masters or other post baccalariat degree.
% Required if you have one.
\masterinstitution{UNIVERSITY NAME}
\masterdegree{DEGREE}
\masteryear{YEAR}
%
% If you have more than one masters degree:
%\masterinstitutiontwo{University of Alabama}
%\masterdegreetwo{Master of Arts in Art}
%\masteryeartwo{2015}
% and so on up to \master...three
% If you have a previous PhD, use the next available \master...
% commands for it.
%
% Name of degree plus month and year of the final approval.
% Required.
%\degreename{Master of Science in Mathematics}
\degreename{Doctor of Philosophy in Mathematics}
\date{DATE}
% Your advisor
% Required.
% Use the first for masters, the second for PhD.
%\thesisdirector{Luigi N. Mario, M.F.A.} % for master's degree.
% or
\dissertationdirector{ADVISOR NAME.}
% Your dissertation/thesis committee. Titles used to be required (Dr. or Prof.
% unless neither applies). But now they seem to want just the highest degree
% earned after the name.
% Two required, extras are optional. I have made provision for up to
% four
\committeememberone{COMMITTEE MEMBER NAME.}
\committeemembertwo{COMMITTEE MEMBER NAME.}
%\committeememberthree{Dr.\ Mario N. Luigi}
%\committeememberfour{Luigi N. Mario, M.F.A.}
\begin{document}
% Start of dissertation/thesis. The \frontmatter command turns off page
% numbering until the \mainmatter command. This is the style mandated
% by the UA dissertation guide. Do not complain to me.
% Required:
\frontmatter
\maketitle
% The grad school now requires the right margins not be justified.
% The \raggedright command is rather inelegant. One can get more
% control of "raggedness" using the ragged2e package.
% Required if some package you used turns it off:
%\raggedright
%\parindent 20pt % reset indentation removed by previous command
% The abstract. Should be less than one page, but this is not forced.
% Required.
\include{Abstract}
% Acknowledgements. Usually less than one page
% Not required, but I've never seen a thesis without one.
\include{Acknowledgment}
% Table of Contents.
% Required:
\tableofcontents
% Other lists if applicable:
% \listoftables
% etc.
%
% Signals start of actual thesis. Starts up page numbering.
% Required:
\mainmatter
% Introductory section or chapter.
% An introduction is not required but very highly recommended. A
% thesis consisting of reproduced published articles *must* include
% a section titled "Introduction" separate from those articles:
% \chapter{Introduction} or
% \section{Introduction}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Put the rest of the theses here. Several more sections %
% (chapters) containing actual mathematics and proofs. %
\include{Chapter_1}
\include{Conclusion}
\include{Appendix_A}
\include{Appendix_B}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% If you load a package that changes the behavior of citations and
% bibliography formatting, the following two commands may be necessary
% (and shouldn't hurt). Adjust the first fraction if the Grad School
% hassles you about spacing between references.
\singlespacing
\setlength{\itemsep}{.75\baselineskip plus .2\baselineskip minus .1\baselineskip}
%
% If no biblography package is loaded, the references will be formatted
% as a section, starting on a new page. If you want it formatted as a
% new chapter, let me know, but expect the grad school to complain about
% the formating in the table of contents.
%
% If you prefer the References section to be labelled something else:
%\renewcommand{\refname}{Bibliography}% "Works Cited" is another possibility.
%
% The reference section is required to be listed in the TOC, and an added
% package may change that. If so the following may be needed just before
% the bibliography:
% \clearpage
% \addcontentsline{toc}{section}{\refname}%
%
% The closing "thebibliography" environment can be completely replaced
% (if you use BibTeX) by
% \bibliographstyle{plain}% for example, or amsplain or whatever.
% \bibliography{nameoffile}% name of your .bib data file
% If you use natbib package, use a comatible style, e.g., plainnat
%
\bibliographystyle{plain}
\bibliography{reference.bib}
%
% Appendices go after
%\appendix
% If chapters are used:
%\chapter{Some more stuff}% Appendix A
% otherwise
%\section{Some more stuff}% Appendix A
%
\end{document}

View File

@ -0,0 +1,429 @@
\NeedsTeXFormat{LaTeX2e}
\ProvidesClass{uamaththesis}[2018/04/28 Class for University of
Arkansas math theses]
\newif\ifMS
\MSfalse
\newif\ifMA
\MAfalse
\newif\ifPhD
\PhDtrue
\newif\ifmasters
\mastersfalse
\newif\ifchapters
\chaptersfalse
\DeclareOption{chapters}{\chapterstrue}
\DeclareOption{MS}{\MStrue\PhDfalse\masterstrue}
\DeclareOption{MA}{\MAtrue\PhDfalse\masterstrue}
\DeclareOption{PhD}{\PhDtrue}
\DeclareOption*{
\ifchapters % Set by the class option.
\PassOptionsToClass{\CurrentOption}{report}%
\else
\PassOptionsToClass{\CurrentOption}{article}%
\fi
}
\ProcessOptions
\ifchapters
\LoadClass[12pt]{report}
\def\@makechapterhead#1{%
{\parindent \z@ \raggedright \normalfont
\ifnum \c@secnumdepth >\m@ne
\normalfont\bfseries \@chapapp\space \thechapter
\par\nobreak
%\vskip 20\p@
\fi
\interlinepenalty\@M
\normalfont \bfseries #1\par\nobreak
%\vskip 40\p@
}%
}
\def\@makeschapterhead#1{%
{\parindent \z@ \raggedright
\normalfont
\interlinepenalty\@M
\normalfont \bfseries #1\par\nobreak
%\vskip 40\p@
}%
}
\else
\LoadClass[12pt]{article}
\fi
\renewcommand{\contentsname}{Table of Contents}
\providecommand\refname{}
% They seem to allow "Bibliography" now, not that it matters much.
\renewcommand{\refname}{References}
%
% Footnotes the same size as regular text
\long\def\@footnotetext#1{\insert\footins{%
\reset@font\normalsize
\interlinepenalty\interfootnotelinepenalty
\splittopskip\footnotesep
\splitmaxdepth \dp\strutbox \floatingpenalty \@MM
\hsize\columnwidth \@parboxrestore
\protected@edef\@currentlabel{%
\csname p@footnote\endcsname\@thefnmark
}%
\color@begingroup
\@makefntext{\normalsize\selectfont%
\rule\z@\footnotesep\ignorespaces#1\@finalstrut\strutbox}%
\color@endgroup}}%
% Section headings same size as normal text
\renewcommand\section{\@startsection {section}{1}{\z@}%
{-3.5ex \@plus -1ex \@minus -.2ex}%
{2.3ex \@plus.2ex}%
{\normalfont\normalsize\bfseries}}
\renewcommand\subsection{\@startsection{subsection}{2}{\z@}%
{-3.25ex\@plus -1ex \@minus -.2ex}%
{1.5ex \@plus .2ex}%
{\normalfont\normalsize\scshape}}
\renewcommand\subsubsection{\@startsection{subsubsection}{3}{\z@}%
{-3.25ex\@plus -1ex \@minus -.2ex}%
{1.5ex \@plus .2ex}%
{\normalfont\normalsize\itshape}}
\renewcommand\paragraph{\@startsection{paragraph}{4}{\z@}%
{3.25ex \@plus1ex \@minus.2ex}%
{-1em}%
{\normalfont\normalsize\bfseries}}
\renewcommand\subparagraph{\@startsection{subparagraph}{5}{\parindent}%
{3.25ex \@plus1ex \@minus .2ex}%
{-1em}%
{\normalfont\normalsize\scshape}}
\newif\if@mainmatter
\newcommand\frontmatter{%
\clearpage
\@mainmatterfalse
\pagestyle{empty}}
\newcommand\mainmatter{%
\clearpage
\@mainmattertrue
\pagenumbering{arabic}%
\pagestyle{plain}}
\newcommand\backmatter{%
\@mainmatterfalse}
% This is for the math department, after all.
\RequirePackage{amsmath,amssymb, amsthm}
\RequirePackage{setspace}
\RequirePackage[nottoc]{tocbibind}
% One inch margins, one column text, USpaper
\setlength\oddsidemargin{0pt}
\setlength\textwidth{6.5in}
\setlength\topmargin{0pt}
\setlength\headheight{0pt}
\setlength\headsep{0pt}
\setlength\topskip{12pt}
% This gets the page number where it needs to be, because
% we want \texheight + 28.8pt (two baselines to the bottom of the
% page number) to give 9.25 in. I.e., page number is .75in from edge.
% this should be 8.85 (1/8 in larger) but somehow my printer puts the
% page number too low with that value.
\setlength\textheight{8.85in}
\setlength\footskip{28.8pt}
% No page headers
% Upright numbers in enumerates, even in theorems. For more
% flexibility in number formating, load the enumerate package.
\renewcommand\labelenumi{\normalfont\theenumi.}
\renewcommand\labelenumii{\normalfont(\theenumii)}
\renewcommand\labelenumiii{\normalfont\theenumiii.}
\renewcommand\labelenumiv{\normalfont\theenumiv.}
% Required: single-spaced entries, with double spacing between. Thus,
% turn off double spacing and give \itemsep value
% 2017: apparently new requirement that the references appear in the TOC
\renewenvironment{thebibliography}[1]
{\clearpage
\singlespacing
\section*{\refname}%
\addcontentsline{toc}{section}{\refname}
\@mkboth{\MakeUppercase\refname}{\MakeUppercase\refname}%
\setlength\itemsep{.75\baselineskip plus .3333\baselineskip minus
.1667\baselineskip}
\list{\@biblabel{\@arabic\c@enumiv}}%
{\settowidth\labelwidth{\@biblabel{#1}}%
\leftmargin\labelwidth
\advance\leftmargin\labelsep
\@openbib@code
\usecounter{enumiv}%
\let\p@enumiv\@empty
\renewcommand\theenumiv{\@arabic\c@enumiv}}%
\sloppy
\clubpenalty4000
\@clubpenalty \clubpenalty
\widowpenalty4000%
\sfcode`\.\@m}
{\def\@noitemerr
{\@latex@warning{Empty `thebibliography' environment}}%
\endlist}
\theoremstyle{plain}
\newtheorem{theorem}{Theorem}[section]
\newtheorem{proposition}[theorem]{Proposition}
\newtheorem{conj}[theorem]{Conjecture}
\newtheorem{corollary}[theorem]{Corollary}
\newtheorem{lem}[theorem]{Lemma}
\theoremstyle{definition}
\newtheorem{defn}{Definition}[section]
\newtheorem{example}{Example}[section]
\theoremstyle{remark}
\newtheorem{remark}{Remark}
\def\degreename#1{\def\@degreename{#1}}
\def\@degreename{%
\ifmasters
\ifMS
Master of Science in Mathematics%
\else
Master of Arts in Secondary Mathematics%
\fi
\else
Doctor of Philosophy in Mathematics%
\fi}
\def\bachelorinstitutionone#1{\def\@bachelorinstitutionone{#1}}
\def\bachelordegreeone#1{\def\@bachelordegreeone{#1}}
\def\bacheloryearone#1{\def\@bacheloryearone{#1}}
\def\bachelorinstitutiontwo#1{\def\@bachelorinstitutiontwo{#1}}
\def\bachelordegreetwo#1{\def\@bachelordegreetwo{#1}}
\def\bacheloryeartwo#1{\def\@bacheloryeartwo{#1}}
\def\bachelorinstitutionthree#1{\def\@bachelorinstitutionthree{#1}}
\def\bachelordegreethree#1{\def\@bachelordegreethree{#1}}
\def\bacheloryearthree#1{\def\@bacheloryearthree{#1}}
\def\masterinstitutionone#1{\def\@masterinstitutionone{#1}}
\def\masterinstitutiontwo#1{\def\@masterinstitutiontwo{#1}}
\def\masterinstitutionthree#1{\def\@masterinstitutionthree{#1}}
\def\masterdegreeone#1{\def\@masterdegreeone{#1}}
\def\masterdegreetwo#1{\def\@masterdegreetwo{#1}}
\def\masterdegreethree#1{\def\@masterdegreethree{#1}}
\def\masteryearone#1{\def\@masteryearone{#1}}
\def\masteryeartwo#1{\def\@masteryeartwo{#1}}
\def\masteryearthree#1{\def\@masteryearthree{#1}}
%Compatability
\let\bachelordegree=\bachelordegreeone
\let\bachelorinstitution=\bachelorinstitutionone
\let\bacheloryear=\bacheloryearone
\let\masterdegree=\masterdegreeone
\let\masterinstitution=\masterinstitutionone
\let\masteryear=\masteryearone
\def\thesisdirector#1{\def\@thesisdirector{#1}}
\def\dissertationdirector#1{\def\@dissertationdirector{#1}}
\def\committeememberone#1{\def\@committeememberone{#1}}
\def\committeemembertwo#1{\def\@committeemembertwo{#1}}
\def\committeememberthree#1{%
\def\@committeememberthree{#1}%
\def\extracommittee##1##2{##1}%
}
\def\committeememberfour#1{%
\def\@committeememberfour{#1}%
\def\extracommittee##1##2{##1##2}%
}
%initialization
\let\@title\@empty
\let\@author\@empty
\let\@bachelorinstitutionone\@empty
\let\@bachelordegreeone\@empty
\let\@bacheloryearone\@empty
\let\@bachelorinstitutiontwo\@empty
\let\@bachelordegreetwo\@empty
\let\@bacheloryeartwo\@empty
\let\@bachelorinstitutionthree\@empty
\let\@bachelordegreethree\@empty
\let\@bacheloryearthree\@empty
\let\@masterinstitutionone\@empty
\let\@masteryearone\@empty
\let\@masterdegreeone\@empty
\let\@masterinstitutiontwo\@empty
\let\@masteryeartwo\@empty
\let\@masterdegreetwo\@empty
\let\@masterinstitutionthree\@empty
\let\@masteryearthree\@empty
\let\@masterdegreethree\@empty
\let\@thesisdirector\@empty
\let\@dissertationdirector\@empty
\let\@committeememberone\@empty
\let\@committeemembertwo\@empty
\let\@committeememberthree\@empty
\let\@committeememberfour\@empty
\let\@date\@empty
\def\maketitle{%
\pagestyle{empty}
\begingroup
\clearpage
\singlespacing
\begin{centering}
\@title
\nobreak
\vspace{.55in minus .15in}
A \ifmasters thesis \else dissertation \fi submitted in partial
fulfillment\\* of the requirements for the degree of\\*
\@degreename\par
\nobreak
\vspace{.55in minus .15in}
by
\nobreak
\vspace{.55in minus .15in}
\@author\\*
\@bachelorinstitutionone\\*
\@bachelordegreeone, \@bacheloryearone
% more than one bachelor's degree?
\ifx\@bachelordegreetwo\@empty\else\\*
\@bachelorinstitutiontwo\\*
\@bachelordegreetwo, \@bacheloryeartwo
\fi
\ifx\@bachelordegreethree\@empty\else\\*
\@bachelorinstitutionthree\\*
\@bachelordegreethree, \@bacheloryearthree
\fi
% Up to three master's degrees
\ifx\@masterdegreeone\@empty\else\\*
\@masterinstitutionone\\*
\@masterdegreeone, \@masteryearone
\fi
\ifx\@masterdegreetwo\@empty\else\\*
\@masterinstitutiontwo\\*
\@masterdegreetwo, \@masteryeartwo
\fi
\ifx\@masterdegreethree\@empty\else\\*
\@masterinstitutionthree\\*
\@masterdegreethree, \@masteryearthree
\fi
\nobreak
\vspace{.55in minus .15in}
\@date\\*
University of Arkansas
\nobreak
\end{centering}
\nobreak
\vspace{.55in minus .15in}
\noindent This \ifmasters thesis \else dissertation \fi is approved for
recommendation to the Graduate Council.
\nobreak
\vspace{.8in minus .15in}
\vbox{
\noindent \rule{3in}{.4pt}\hfil\break
\hbox to 3.24in{ \ifmasters\@thesisdirector\else\@dissertationdirector\fi\hfil}\hfil\break
\hbox to 3.24in{ \ifmasters Thesis \else Dissertation \fi Director\hfil}}
\nobreak
\vspace{.8in minus .1in}
\vbox{
\noindent \rule{3in}{.4pt}\hfil\rule{3in}{.4pt}\hfil\break
\hbox to 3.24in{ \@committeememberone\hfil}%
\hbox to 3.24in{ \@committeemembertwo\hfil}\hfil
\hbox to 3.24in{ Committee Member \hfil}%
\hbox to 3.24in{ Committee Member \hfil}}%\par
\ifx\@committeememberthree\@empty\else
\nobreak
\vspace{.5in minus.1in}
\vbox{
\extracommittee%
{\noindent \rule{3in}{.4pt}}%
{\hspace{.24in}\rule{3in}{.4pt}}\hfil\break
\extracommittee%
{\hbox to 3.24in{ \@committeememberthree\hfil}}%
{\hbox to 3.24in{ \@committeememberfour\hfil}}\hfil\break
\extracommittee%
{\hbox to 3.24in{ Committee Member \hfil}}%
{\hbox to 3.24in{ Committee Member \hfil}}\par
}\fi
\endgroup
\let\@title\@empty
\let\@author\@empty
\let\@degreename\@empty
%
\let\@bachelorinstitutionone\@empty
\let\@bachelordegreeone\@empty
\let\@bacheloryearone\@empty
\let\@bachelorinstitutiontwo\@empty
\let\@bachelordegreetwo\@empty
\let\@bacheloryeartwo\@empty
\let\@bachelorinstitutionthree\@empty
\let\@bachelordegreethree\@empty
\let\@bacheloryearthree\@empty
%
\let\@masterinstitutionone\@empty
\let\@masterinstitutiontwo\@empty
\let\@masterinstitutionthree\@empty
\let\@masteryearone\@empty
\let\@masteryeartwo\@empty
\let\@masteryearthree\@empty
\let\@masterdegreeone\@empty
\let\@masterdegreetwo\@empty
\let\@masterdegreethree\@empty
%
\let\@dissertationdirector\@empty
\let\@committeememberone\@empty
\let\@committeemembertwo\@empty
\let\@committeememberthree\@empty
\let\@committeememberfour\@empty
\let\@date\@empty
\clearpage
\doublespacing
}
\renewenvironment{abstract}%
{\clearpage
{\noindent \textbf{Abstract} \par}}
{\newpage}
\newenvironment{acknowledgements}%
{\clearpage
{\noindent \textbf{Acknowledgements} \par}}
{\newpage}
\newenvironment{dedication}%
{\clearpage}
{\newpage}
\renewcommand\tableofcontents{%
\clearpage
\section*{\contentsname
\@mkboth{%
\MakeUppercase\contentsname}{\MakeUppercase\contentsname}}%
\@starttoc{toc}%
}
\renewcommand\listoftables{%
\clearpage
\section*{\listtablename
\@mkboth{%
\MakeUppercase\listtablename}{\MakeUppercase\listtablename}}%
\@starttoc{lot}%
}
\renewcommand\listoffigures{%
\clearpage
\section*{\listfigurename
\@mkboth{%
\MakeUppercase\listfigurename}{\MakeUppercase\listfigurename}}%
\@starttoc{lof}%
}
\raggedright
\parindent 20pt
\endinput

View File

@ -256,7 +256,7 @@ Let $x \in \R^d$. Upon instantiation with $\rect$ we have that:
\end{align}
\textit{Case 2.ii:} Let $\nu = \lp \lp W_1,b_1 \rp, \lp W_2,b_2 \rp, ..., \lp W_L, b_L \rp \rp $. Deriving from Definition \ref{7.2.1} and \ref{5.2.1} we have that:
\begin{align}
\id_d \bullet \nu =\lp \lp W_1,b_1\rp, \lp W_2, b_2 \rp,...,\lp W_{L-1},b_{L-1} \rp, \lp \begin{bmatrix}
&\id_d \bullet \nu \nonumber\\ &=\lp \lp W_1,b_1\rp, \lp W_2, b_2 \rp,...,\lp W_{L-1},b_{L-1} \rp, \lp \begin{bmatrix}
[W_L]_{1,*} \\
-[W_L]_{1,*}\\
\vdots \\
@ -284,7 +284,7 @@ This, along with Case 2.i implies that the uninstantiated last layer is equivale
\textit{Case 2.iii:} Let $\nu = \lp \lp W_1,b_1\rp \rp$. Deriving from Definition \ref{7.2.1} and \ref{5.2.1} we have:
\begin{align}
&\nu \bullet \id_d \nonumber\\ &= \lp \lp \begin{bmatrix}
&\nu \bullet \id_d = \nonumber\\ &\lp \lp \begin{bmatrix}
\we_{\id_1,1} \\
&&\ddots \\
&&& \we_{\id_1,1}
@ -1030,7 +1030,7 @@ Given $x\in \R$, it is straightforward to find the maximum; $ x$ is the maximum.
Item (vi) is a straightforward consequence of Item (i). This completes the proof of the lemma.
\end{proof}
\subsection{The $\mathsf{MC}$ Neural Network and Approximations via Maximum Convolutions }
\subsection{The $\mathsf{MC}^{N,d}_{x,y}$ Neural Network and Approximations via Maximum Convolutions }
Let $f: [a,b] \rightarrow \R$ be a continuous bounded function with Lipschitz constant $L$. Let $x_0 \les x_1 \les \cdots \les x_N$ be a set of sample points within $[a,b]$, with it being possibly the case that that for all $i \in \{0,1,\hdots, N\}$, $x_i \sim \unif([a,b])$. For all $i \in \{0,1,\hdots, N\}$, define a series of functions $f_0,f_1,\hdots f_N: [a,b] \rightarrow \R$, as such:
\begin{align}

View File

@ -8,7 +8,7 @@ We will build up the tools necessary to approximate $e^x$ via neural networks in
\item The accuracy of our neural networks.
\end{enumerate}
\subsection{The squares of real numbers in $\lb 0,1 \rb$}
One of the most important operators we can
\begin{definition}[The $\mathfrak{i}_d$ Network]\label{def:mathfrak_i}
For all $d \in \N$ we will define the following set of neural networks as ``activation neural networks'' denoted $\mathfrak{i}_d$ as:
\begin{align}
@ -204,8 +204,8 @@ We will build up the tools necessary to approximate $e^x$ via neural networks in
and let $\Phi \in \neu$ be defined as:
\begin{align}
\Phi = \begin{cases}\label{def:Phi}
\lb \aff_{C_1,0}\bullet \mathfrak{i}_4\rb \bullet \aff_{\mymathbb{e}_4,B} & M=1 \\
\lb \aff_{C_M,0} \bullet \mathfrak{i}_4\rb\bullet \lb \aff_{A_{M-1},0} \bullet \mathfrak{i}_4 \rb \bullet \cdots \bullet \lb \aff_{A_1,B}\bullet \mathfrak{i}_4\rb \bullet \aff_{\mymathbb{e}_4,B} & M \in \lb 2,\infty \rp \cap \N
\lb \aff_{C_1,0}\bullet \mathfrak{i}_4\rb \bullet \aff_{\mymathbb{e}_4,B} & :M=1 \\
\lb \aff_{C_M,0} \bullet \mathfrak{i}_4\rb\bullet \lb \aff_{A_{M-1},0} \bullet \mathfrak{i}_4 \rb \bullet \cdots \bullet \lb \aff_{A_1,B}\bullet \mathfrak{i}_4\rb \bullet \aff_{\mymathbb{e}_4,B} &: M \in \lb 2,\infty \rp \cap \N
\end{cases}
\end{align}
it is then the case that:
@ -221,7 +221,7 @@ We will build up the tools necessary to approximate $e^x$ via neural networks in
\begin{proof}
Items (i)--(iii) are direct consequences of Lemma \ref{lem:6.1.1}, Items (i)--(iii). Note next the fact that $M = \min \left\{\N \cap \lb \frac{1}{2} \log_2 \lp \ve^{-1}\rp-1\rb,\infty\right\}$ ensures that:
\begin{align}
M = \min \left\{ \N \cap \lb \frac{1}{2}\log_2\lp \ve^{-1}\rp-1\rb, \infty\right\} \ges \min \left\{ \lb\max \left\{ 1,\frac{1}{2}\log_2 \lp\ve^{-1} \rp-1\right\},\infty \rb\right\} \ges \frac{1}{2}\log_2 \lp \ve^{-1}\rp-1
&M = \min \left\{ \N \cap \lb \frac{1}{2}\log_2\lp \ve^{-1}\rp-1\rb, \infty\right\}\\ &\ges \min \left\{ \lb\max \left\{ 1,\frac{1}{2}\log_2 \lp\ve^{-1} \rp-1\right\},\infty \rb\right\}\\ &\ges \frac{1}{2}\log_2 \lp \ve^{-1}\rp-1
\end{align}
This and Item (v) of Lemma \ref{lem:6.1.1} demonstrate that for all $x\in \lb 0,1\rb$ it then holds that:
\begin{align}
@ -250,7 +250,7 @@ We will build up the tools necessary to approximate $e^x$ via neural networks in
\end{remark}
Now that we have neural networks that perform the squaring operation inside $\lb -1,1\rb$, we may extend to all of $\R$. Note that this neural network representation differs somewhat from the ones in \cite{grohs2019spacetime}.
\subsection{The $\sqr$ network}
\subsection{The $\sqr^{q,\ve}$ network}
\begin{lemma}\label{6.0.3}\label{lem:sqr_network}
Let $\delta,\epsilon \in (0,\infty)$, $\alpha \in (0,\infty)$, $q\in (2,\infty)$, $ \Phi \in \neu$ satisfy that $\delta = 2^{\frac{-2}{q-2}}\ve ^{\frac{q}{q-2}}$, $\alpha = \lp \frac{\ve}{2}\rp^{\frac{1}{q-2}}$, $\real{\rect}\lp\Phi\rp \in C\lp \R,\R\rp$, $\dep(\Phi) \les \max \left\{\frac{1}{2} \log_2(\delta^{-1})+1,2\right\}$, $\param(\Phi) \les \max\left\{10\log_2\lp \delta^{-1}\rp-7,13\right\}$, $\sup_{x \in \R \setminus [0,1]} | \lp \real_{\rect} \lp \Phi \rp -\rect(x) \right| =0$, and $\sup_{x\in \lb 0,1\rb} |x^2-\lp \real_{\rect} \lp \Phi \rp \rp \lp x\rp | \les \delta$, let $\Psi \in \neu$ be the neural network given by:
\begin{align}
@ -304,7 +304,7 @@ Now that we have neural networks that perform the squaring operation inside $\lb
&= \lp \frac{\ve}{2}+ \frac{\ve}{2} \rp \left| x \right|^q = \ve \left| x \right|^q \les \ve \max \left\{ 1, \left| x \right|^q \right\}
\end{align}
Note that (\ref{6.0.24}), (\ref{6.0.21}) and the fact that $\delta = 2^{\frac{-2}{q-2}}\ve^{\frac{q}{q-2}}$ then tell for all $x \in \lb -\lp \frac{\ve}{2} \rp ^{\frac{-1}{q-2}}, \lp \frac{\ve}{2} \rp ^{\frac{-1}{q-2}} \rb$ it holds that:
Note that (\ref{6.0.24}), (\ref{6.0.21}) and the fact that $\delta = 2^{\frac{-2}{q-2}}\ve^{\frac{q}{q-2}}$ then tell for all $x \in \\ \lb -\lp \frac{\ve}{2} \rp ^{\frac{-1}{q-2}}, \lp \frac{\ve}{2} \rp ^{\frac{-1}{q-2}} \rb$ it holds that:
\begin{equation}
\begin{aligned}\label{6.0.26}
% &\left| x^2-\lp \real_{\rect} \lp \Phi \rp \rp \lp x \rp \right| \\
@ -379,6 +379,13 @@ This, and the fact that $\delta = 2^{\frac{-2}{q-2}}\ve ^{\frac{q}{q-2}}$ render
\caption{Left: $\log_{10}$ of depths for a simulation with $q \in \lb 2.1, 4 \rb $, $\ve \in \lp 0.1, 2 \rb$, and $x \in \lb -5,5 \rb$, all with $50$ mesh-points. Right: The theoretical upper limits over the same range of values}
\end{figure}
\begin{figure}[h]
\centering
\includegraphics[width = 0.45\linewidth]{/Users/shakilrafi/R-simulations/Sqr_properties/experimental_params.png}
\includegraphics[width = 0.45\linewidth]{/Users/shakilrafi/R-simulations/Sqr_properties/param_theoretical_upper_limits.png}
\caption{Left: $\log_{10}$ of params for a simulation with $q \in \lb 2.1, 4 \rb $, $\ve \in \lp 0.1, 2 \rb$, and $x \in \lb -5,5 \rb$, all with $50$ mesh-points. Right: The theoretical upper limits over the same range of values}
\end{figure}
% Please add the following required packages to your document preamble:
% \usepackage{booktabs}
@ -388,13 +395,13 @@ This, and the fact that $\delta = 2^{\frac{-2}{q-2}}\ve ^{\frac{q}{q-2}}$ render
& Min. & 1\textsuperscript{st} Qu. & Median & Mean & 3\textsuperscript{rd} Qu. & Max. \\ \midrule
Experimental $|x^2 - \real_{\rect}(\mathsf{Sqr}^{q,\ve})(x)$ & 0.00000 & 0.08943 & 0.33787 & 3.14893 & 4.67465 & 20.00 \\ \midrule
Theoretical $|x^2 - \real_{\rect}(\mathsf{Sqr})^{q,\ve}(x)$ & 0.010 & 1.715 & 10.402 & 48.063 & 45.538 & 1250.00 \\ \midrule
Difference & 0.001 & 1.6012 & 9.8655 & 44.9141 & 40.7102 & 1230
Forward Difference & 0.01 & 1.6012 & 9.8655 & 44.9141 & 40.7102 & 1230
\end{tabular}
\caption{Theoretical upper bounds for $L^1$ error, experimental $L^1$ error and their forward difference, with $q \in \lb 2.1, 4 \rb $, $\ve \in \lp 0.1, 2 \rb$, and $x \in \lb -5,5 \rb$, all with $50$ mesh-points.}
\end{table}
\subsection{The $\prd$ network}
\subsection{The $\prd^{q,\ve}$ network}
We are finally ready to give neural network representations of arbitrary products of real numbers. However, this representation differs somewhat from those found in the literature, especially \cite{grohs2019spacetime}, where parallelization (stacking) is used instead of neural network sums. This will help us calculate $\wid_1$ and the width of the second to last layer.
\begin{lemma}\label{prd_network}
Let $\delta,\ve \in \lp 0,\infty \rp $, $q\in \lp 2,\infty \rp$, $A_1,A_2,A_3 \in \R^{1\times 2}$, $\Psi \in \neu$ satisfy for all $x\in \R$ that $\delta = \ve \lp 2^{q-1} +1\rp^{-1}$, $A_1 = \lb 1 \quad 1 \rb$, $A_2 = \lb 1 \quad 0 \rb$, $A_3 = \lb 0 \quad 1 \rb$, $\real_{\rect} \in C\lp \R, \R \rp$, $\lp \real_{\rect} \lp \Psi \rp \rp \lp 0\rp = 0$, $0\les \lp \real_{\rect} \lp \Psi \rp \rp \lp x \rp \les \delta+|x|^2$, $|x^2-\lp \real_{\rect}\lp \Psi \rp \rp \lp x \rp |\les \delta \max \{1,|x|^q\}$, $\dep\lp \Psi \rp \les \max\{ 1+\frac{1}{q-2}+\frac{q}{2(q-2)}\log_2 \lp \delta^{-1} \rp ,2\}$, and $\param \lp \Psi \rp \les \max\left\{\lb \frac{40q}{q-2} \rb \log_2\lp \delta^{-1} \rp +\frac{80}{q-2}-28,52\right\}$, then:
@ -558,7 +565,7 @@ Observe next that for $q\in \lp 0,\infty\rp$, $\ve \in \lp 0,\infty \rp$, $\Gamm
\begin{remark}
Diagrammatically, this can be represented as:
\end{remark}
\begin{figure}
\begin{figure}[h]
\begin{center}
\tikzset{every picture/.style={line width=0.75pt}} %set default line width to 0.75pt
@ -614,11 +621,51 @@ Observe next that for $q\in \lp 0,\infty\rp$, $\ve \in \lp 0,\infty \rp$, $\Gamm
\end{tikzpicture}
\end{center}
\caption{A neural network diagram of the $\sqr$. }
\caption{Neural network diagram of the $\prd^{q,\ve}$ network.}
\end{figure}
\begin{figure}[h]
\centering
\includegraphics[width = 0.45\linewidth]{/Users/shakilrafi/R-simulations/Prd_properties/experimental_deps.png}
\includegraphics[width = 0.45\linewidth]{/Users/shakilrafi/R-simulations/Prd_properties/dep_theoretical_upper_limits.png}
\caption{Left: $\log_{10}$ of deps for a simulation of $\prd^{q,\ve}$ with $q \in \lb 2.1, 4 \rb $, $\ve \in \lp 0.1, 2 \rb$, and $x \in \lb -5,5 \rb$, all with $50$ mesh-points. Right: The theoretical upper limits over the same range of values.}
\end{figure}
\begin{figure}[h]
\centering
\includegraphics[width = 0.45\linewidth]{/Users/shakilrafi/R-simulations/Prd_properties/experimental_params.png}
\includegraphics[width = 0.45\linewidth]{/Users/shakilrafi/R-simulations/Prd_properties/param_theoretical_upper_limits.png}
\caption{Left: $\log_{10}$ of params for a simulation of $\prd^{q,\ve}$ with $q \in \lb 2.1, 4 \rb $, $\ve \in \lp 0.1, 2 \rb$, and $x \in \lb -5,5 \rb$, all with $50$ mesh-points. Right: The theoretical upper limits over the same range of values.}
\end{figure}
\begin{figure}[h]
\centering
\includegraphics[width = \linewidth]{/Users/shakilrafi/R-simulations/Sqr_properties/iso.png}
\caption{Isosurface plot showing $|x^2 - \sqr^{q,\ve}|$ for $q \in [2.1,4]$, $\ve \in [0.01,2]$, and $x \in [-5,5]$ with 50 mesh-points in each.}
\end{figure}
\begin{table}[h]
\begin{tabular}{l|llllll}
\hline
& Min & 1st. Qu & Median & Mean & 3rd Qu & Max. \\ \hline
Experimental \\ $|x^2 - \inst_{\rect}\lp \sqr^{q,\ve}\rp(x)|$ & 0.0000 & 0.0894 & 0.3378 & 3.1489 & 4.6746 & 20.0000 \\ \hline
Theoretical upper limits for\\ $|x^2 - \mathfrak{R}_{\mathfrak{r}}(\mathsf{Sqr})(x)$ & 0.010 & 1.715 & 10.402 & 48.063 & 45.538 & 1250.000 \\ \hline
\textbf{Forward Difference} & 0.001 & 1.6012 & 9.8655 & 44.9141 & 40.7102 & 1230 \\ \hline
Experimental depths & 2 & 2 & 2 & 2.307 & 2 & 80 \\ \hline
Theoretical upper bound on\\ depths & 2 & 2 & 2 & 2.73 & 2 & 91 \\ \hline
\textbf{Forward Difference} & 0 & 0 & 0 & 0.423 & 0 & 11 \\ \hline
Experimental params & 25 & 25 & 25 & 47.07 & 25 & 5641 \\ \hline
Theoretical upper limit on \\ params & 52 & 52 & 52 & 82.22 & 52 & 6353 \\ \hline
\textbf{Forward Differnce} & 27 & 27 & 27 & 35.16 & 27 & 712 \\ \hline
\end{tabular}
\caption{Table showing the experimental and theoretical $1$-norm difference, depths, and parameter counts respectively for $\sqr^{q,\ve}$ with $q\in [2.1,4]$, $\ve \in [0.01,2]$, and $x \in [-5,5]$ all with $50$ mesh-points, and their forward differences.}
\end{table}
\section{Higher Approximations}\label{sec_tun}
We take inspiration from the $\sm$ neural network to create the $\prd$ neural network. However, we first need to define a special neural network called \textit{tunneling neural network} to stack two neural networks not of the same length effectively.
\subsection{The $\tun$ Neural Networks and Their Properties}
\subsection{The $\tun^d_n$ Neural Networks and Their Properties}
\begin{definition}[R\textemdash,2023, The Tunneling Neural Networks]\label{def:tun}
We define the tunneling neural network, denoted as $\tun_n$ for $n\in \N$ by:
\begin{align}
@ -722,8 +769,8 @@ We take inspiration from the $\sm$ neural network to create the $\prd$ neural ne
\end{align}
Now for the inductive step assume that for all $n\les N\in \N$, it is the case that $\param\lp \tun_n \rp = 7+6(n-2)$. For the inductive step, we then have:
\begin{align}
&\param \lp \tun_{n+1} \rp = \param \lp \tun_n \bullet \id_1 \rp \nonumber\\
&=\param \lb \lp \lp \begin{bmatrix}
&\param \lp \tun_{n+1} \rp = \param \lp \tun_n \bullet \id_1 \rp =\nonumber\\
&\param \lb \lp \lp \begin{bmatrix}
1 \\ -1
\end{bmatrix}, \begin{bmatrix}
0 \\ 0
@ -735,8 +782,8 @@ We take inspiration from the $\sm$ neural network to create the $\prd$ neural ne
1 & -1
\end{bmatrix}, \begin{bmatrix}
0
\end{bmatrix}\rp \rp \bullet \id_1 \rb \nonumber \\
&= \param \lb \lp \lp \begin{bmatrix}
\end{bmatrix}\rp \rp \bullet \id_1 \rb =\nonumber \\
&\param \lb \lp \lp \begin{bmatrix}
1 \\ -1
\end{bmatrix}, \begin{bmatrix}
0 \\ 0
@ -807,8 +854,8 @@ We take inspiration from the $\sm$ neural network to create the $\prd$ neural ne
1 & -1\\ & &\ddots \\ & & & 1 & -1
\end{bmatrix}, \begin{bmatrix}
0 \\ \vdots \\ 0
\end{bmatrix}\rp \rp \rb \nonumber \\
&= \param \lb \lp \lp \begin{bmatrix}
\end{bmatrix}\rp \rp \rb =\nonumber \\
&\param \lb \lp \lp \begin{bmatrix}
1 \\ -1 \\ & \ddots \\ & & 1 \\ & &-1
\end{bmatrix}, \begin{bmatrix}
0 \\ 0 \\\vdots \\ 0\\0
@ -816,7 +863,7 @@ We take inspiration from the $\sm$ neural network to create the $\prd$ neural ne
1 & -1 \\ -1 & 1 \\ & & \ddots \\ & & & 1 & -1 \\ & & & -1 & 1
\end{bmatrix}, \begin{bmatrix}
0 \\ 0 \\ \vdots \\ 0 \\ 0
\end{bmatrix}\rp, \lp \begin{bmatrix}
\end{bmatrix}\rp, \right. \right.\\ & \left.\left. \lp \begin{bmatrix}
1 &-1 \\ & &\ddots \\ & & & 1 & -1
\end{bmatrix},\begin{bmatrix}
0 \\ \vdots \\ 0
@ -829,7 +876,7 @@ We take inspiration from the $\sm$ neural network to create the $\prd$ neural ne
\begin{align}
& \param\lp \tun^d_{n+1}\rp = \param \lp \tun_n^d \bullet \id_d\rp \nonumber \\
& = \param \lb \lp \begin{bmatrix}
= &\param \lb \lp \begin{bmatrix}
1 \\ -1 \\ & \ddots \\ & & 1 \\ & &-1
\end{bmatrix}, \begin{bmatrix}
0 \\ 0 \\ \vdots \\ 0 \\ 0
@ -837,13 +884,12 @@ We take inspiration from the $\sm$ neural network to create the $\prd$ neural ne
1 & -1 \\ -1 & 1 \\ & \ddots \\ & & 1 & -1 \\ & & -1 & 1
\end{bmatrix}, \begin{bmatrix}
0 \\ 0 \\ \vdots \\ 0 \\ 0
\end{bmatrix} \rp, \hdots, \lp \begin{bmatrix}
\end{bmatrix} \rp, \hdots, \right. \\ &\left. \lp \begin{bmatrix}
1 &-1 \\ & \ddots \\ & & 1 & -1
\end{bmatrix}, \begin{bmatrix}
0 \\ \vdots \\ 0
\end{bmatrix}\rp \right. \nonumber \\
& \left. \bullet \id_d \rb \nonumber\\
& = \param \lb \lp \begin{bmatrix}
\end{bmatrix}\rp \bullet \id_d \rb \nonumber\\
= &\param \lb \lp \begin{bmatrix}
1 \\ -1 \\ & \ddots \\ & & 1 \\ & &-1
\end{bmatrix}, \begin{bmatrix}
0 \\ 0 \\ \vdots \\ 0 \\ 0
@ -851,11 +897,11 @@ We take inspiration from the $\sm$ neural network to create the $\prd$ neural ne
1 & -1 \\ -1 & 1 \\ & \ddots \\ & & 1 & -1 \\ & & -1 & 1
\end{bmatrix}, \begin{bmatrix}
0 \\ 0 \\ \vdots \\ 0 \\ 0
\end{bmatrix} \rp, \hdots, \lp \begin{bmatrix}
\end{bmatrix} \rp, \hdots, \right.\\ &\left.\lp \begin{bmatrix}
1 & -1 \\ -1 & 1 \\ & \ddots \\ & & 1 & -1 \\ & & -1 & 1
\end{bmatrix}, \begin{bmatrix}
0 \\ 0 \\ \vdots \\ 0 \\ 0
\end{bmatrix} \rp, \right. \nonumber\\ &\left. \lp \begin{bmatrix}
\end{bmatrix} \rp, \lp \begin{bmatrix}
1 &-1 \\ & \ddots \\ & & 1 & -1
\end{bmatrix}, \begin{bmatrix}
0 \\ \vdots \nonumber\\ 0
@ -872,7 +918,7 @@ This proves Item (iv). Finally, Item (v) is a consequence of Lemma \ref{5.3.2}
\subsection{The $\pwr$ Neural Networks and Their Properties}
\subsection{The $\pwr_n^{q,\ve}$ Neural Networks and Their Properties}
\begin{definition}[R\textemdash, 2023, The Power Neural Network]\label{def:pwr}
Let $n\in \N$. Let $\delta,\ve \in \lp 0,\infty \rp $, $q\in \lp 2,\infty \rp$, satisfy that $\delta = \ve \lp 2^{q-1} +1\rp^{-1}$. We define the power neural networks $\pwr_n^{q,\ve} \in \neu$, denoted for $n\in \N_0$ as:
@ -1014,7 +1060,7 @@ Let $\mathfrak{p}_i$ for $i \in \{1,2,...\}$ be the set of functions defined for
\end{align}
This, with the fact that the composition of continuous functions is continuous, the fact the stacking of continuous instantiated neural networks is continuous tells us that $\lp \real_{\rect} \pwr_n \rp \in C \lp \R, \R \rp$ for $n \in \N \cap \lb 2,\infty \rp$. This establishes Item (i).
Note next that by observation $\dep \lp \pwr_0^{q,\ve} \rp=1$ and by Item (iv) of Lemma \ref{idprop}, it is the case that $\dep\lp \id_1 \rp = 2$. By Lemmas $\ref{dep_cpy}$ and $\ref{depthofcomposition}$ it is also the case that: $\dep\lp \prd^{q,\ve} \bullet \lb \tun_{\dep(\pwr^{q,\ve}_{n-1})} \boxminus \pwr^{q,\ve}_{n-1} \rb \bullet \cpy \rp = \dep \lp \prd^{q,\ve} \bullet \lb \tun_{\dep(\pwr^{q,\ve}_{n-1})} \boxminus \pwr^{q,\ve}_{n-1} \rb\rp $. Note also that by Lemma we have that $\dep \lp \tun_{\dep \lp \pwr^{q,\ve}_{n-1}\rp} \boxminus \pwr^{q,\ve}_{n-1}\rp = \dep \lp \pwr^{q,\ve}_{n-1} \rp$.
Note next that by observation $\dep \lp \pwr_0^{q,\ve} \rp=1$ and by Item (iv) of Lemma \ref{idprop}, it is the case that $\dep\lp \id_1 \rp = 2$. By Lemmas $\ref{dep_cpy}$ and $\ref{depthofcomposition}$ it is also the case that\\ $\dep\lp \prd^{q,\ve} \bullet \lb \tun_{\dep(\pwr^{q,\ve}_{n-1})} \boxminus \pwr^{q,\ve}_{n-1} \rb \bullet \cpy \rp = \dep \lp \prd^{q,\ve} \bullet \lb \tun_{\dep(\pwr^{q,\ve}_{n-1})} \boxminus \pwr^{q,\ve}_{n-1} \rb\rp $. Note also that by Lemma we have that $\dep \lp \tun_{\dep \lp \pwr^{q,\ve}_{n-1}\rp} \boxminus \pwr^{q,\ve}_{n-1}\rp = \dep \lp \pwr^{q,\ve}_{n-1} \rp$.
This with Lemma \ref{comp_prop} then yields for $n \in \N$ that:
\begin{align}
\dep \lp \pwr^{q,\ve}_n \rp &= \dep \lp \prd \bullet \lb \tun_{\mathcal{D} \lp \pwr^{q,\ve}_{n-1} \rp } \boxminus \pwr^{q,\ve}_{n-1} \rb \bullet \cpy_{2,1} \rp \nonumber \\
@ -1174,6 +1220,28 @@ Let $\mathfrak{p}_i$ for $i \in \{1,2,...\}$ be the set of functions defined for
\begin{remark}\label{rem:pwr_gets_deeper}
Note each power network $\pwr_n^{q,\ve}$ is at least as deep and parameter-rich as the previous power network $\pwr_{n-1}^{q,\ve}$, one differs from the next by one $\prd^{q, \ve}$ network.
\end{remark}
\begin{figure}[h]
\centering
\includegraphics[width = 0.45\linewidth]{/Users/shakilrafi/R-simulations/Pwr_3_properties/experimental_deps.png}
\includegraphics[width = 0.45\linewidth]{/Users/shakilrafi/R-simulations/Pwr_3_properties/dep_theoretical_upper_limits.png}
\caption{Left: $\log_{10}$ of depths for a simulation of $\pwr_3^{q,\ve}$ with $q \in \lb 2.1, 4 \rb $, $\ve \in \lp 0.1, 2 \rb$, and $x \in \lb -5,5 \rb$, all with $50$ mesh-points. Right: The theoretical upper limits over the same range of values}
\end{figure}
\begin{figure}[h]
\centering
\includegraphics[width = 0.45\linewidth]{/Users/shakilrafi/R-simulations/Pwr_3_properties/experimental_params.png}
\includegraphics[width = 0.45\linewidth]{/Users/shakilrafi/R-simulations/Pwr_3_properties/param_theoretical_upper_limits.png}
\caption{Left: $\log_{10}$ of params for a simulation of $\pwr_3^{q,\ve}$ with $q \in \lb 2.1, 4 \rb $, $\ve \in \lp 0.1, 2 \rb$, and $x \in \lb -5,5 \rb$, all with $50$ mesh-points. Right: The theoretical upper limits over the same range of values}
\end{figure}
\begin{figure}[h]
\centering
\includegraphics[width = \linewidth]{/Users/shakilrafi/R-simulations/Pwr_3_properties/isosurface.png}
\caption{Isosurface plot showing $|x^3 - \pwr^{q,\ve}_3|$ for $q \in [2.1,4]$, $\ve \in [0.01,2]$, and $x \in [-5,5]$ with 50 mesh-points in each.}
\end{figure}
\subsection{$\pnm_{n,C}^{q,\ve}$ and Neural Network Polynomials.}
\begin{definition}[Neural Network Polynomials]
@ -1403,7 +1471,7 @@ Once we have neural network polynomials, we may take the next leap to transcende
\end{align}
\end{proof}
\begin{lemma}\label{6.2.8}
Let $n\in \N$. Let $\nu_1,\nu_2,...,\nu_n \in \neu$, $\ve_1,\ve_2,...,\ve_n \in \lp 0,\infty \rp$ and $f_1,f_2,...,f_n \in C\lp \R, \R \rp$ such that for all $i \in \{1,2,...,n\}$, and for all $x\in \R$, it is the case that, $\left| f_i\lp x \rp - \real_{\rect} \lp \nu_i \rp\lp x \rp \right| \les \ve_i$. It is then the case for all $x\in \R$, that:
Let $n\in \N$. Let $\nu_1,\nu_2,...,\nu_n \in \neu$, $\ve_1,\ve_2,...,\ve_n \in \lp 0,\infty \rp$ and $f_1,f_2,...,f_n \in C\lp \R, \R \rp$ such that for all $i \in \{1,2,...,n\}$, and for all $x\in \R$, it is the case that,\\ $\left| f_i\lp x \rp - \real_{\rect} \lp \nu_i \rp\lp x \rp \right| \les \ve_i$. It is then the case for all $x\in \R$, that:
\begin{align}
\left| \sum^n_{i=1} f_i \lp x \rp -\bigoplus^n_{i=1} \lp \real_{\rect}\lp \nu_i \rp \rp \lp x\rp\right| \les \sum_{i=1}^n \ve_i
\end{align}
@ -1625,12 +1693,12 @@ Once we have neural network polynomials, we may take the next leap to transcende
&\left| \sin\lp x\rp - \inst_{\rect}\lp \sne_n^{q,\ve}\rp\right| \nonumber\\
&= \left| \cos \lp x - \frac{\pi}{2}\rp - \inst_{\rect}\lp \csn_n^{q,\ve}\bullet \aff_{1,-\frac{\pi}{2}}\rp\lp x\rp\right| \nonumber\\
&=\left| \cos \lp x-\frac{x}{2}\rp - \inst_{\rect}\csn_n^{q,\ve}\lp x-\frac{\pi}{2} \rp\right| \nonumber \\
&\les \sum^n_{i=1} \left| \frac{\lp -1\rp^i}{2i!}\right|\lp \left| \lp x -\frac{\pi}{2}\rp\lp \lp x -\frac{\pi}{2}\rp^{2i-1} - \real_{\rect}\lp \pwr^{q,\ve}_{i-1}\rp\lp x-\frac{\pi}{2}\rp\rp\right| + \ve + |x|^q + \mathfrak{p}_{i-1}^q \rp+ \frac{|x|^{n+1}}{(n+1)!}\nonumber
&\les \sum^n_{i=1} \left| \frac{\lp -1\rp^i}{2i!}\right|\lp \left| \lp x -\frac{\pi}{2}\rp\lp \lp x -\frac{\pi}{2}\rp^{2i-1} - \real_{\rect}\lp \pwr^{q,\ve}_{i-1}\rp\lp x-\frac{\pi}{2}\rp\rp\right| + \ve + |x|^q + \mathfrak{p}_{i-1}^q \rp \nonumber\\&+ \frac{|x|^{n+1}}{(n+1)!}\nonumber
\end{align}
\end{proof}
\begin{remark}\label{rem:pyth_idt}
Note that under these neural network architectures the famous Pythagorean identity $\sin^2\lp x\rp + \cos^2 \lp x\rp = 1$, may be rendered approximately, for fixed $n,q,\ve$ as: $\lb \sqr^{q,\ve}\bullet \csn^{q,\ve}_n \rb \oplus\lb \sqr^{q,\ve}\bullet \sne^{q,\ve}_n\rb$. A full discussion of the associated parameter, depth, and accuracy bounds are beyond the scope of this dissertation, and may be appropriate for future work.
Note that under these neural network architectures the famous Pythagorean identity $\sin^2\lp x\rp + \cos^2 \lp x\rp = 1$, may be rendered approximately, for fixed $n,q,\ve$ as:\\ $\lb \sqr^{q,\ve}\bullet \csn^{q,\ve}_n \rb \oplus\lb \sqr^{q,\ve}\bullet \sne^{q,\ve}_n\rb$. A full discussion of the associated parameter, depth, and accuracy bounds are beyond the scope of this dissertation, and may be appropriate for future work.
\end{remark}

View File

@ -1,6 +1,6 @@
\chapter{ANN representations of Brownian Motion Monte Carlo}
\textbf{This is tentative without any reference to $f$.}
%\textbf{This is tentative without any reference to $f$.}
We will now take the modified and simplified version of Multi-level Picard introduced in Chapter \ref{chp:2} and show a neural network representation and associated, parameters, depth, and accuracy bounds. However we will also try a different approach in that we will also give a direct neural network representation of the expectation of the stochastic process that Feynman-Kac asserts in Lemma \ref{ues}, and to build up to it we must build the requisite technology in Lemma \ref{mathsfE}, Lemma \ref{UE-prop}, Lemma \ref{UEX}.
\begin{lemma}[R\textemdash,2023]
Let $d,M \in \N$, $T \in (0,\infty)$ , $\act \in C(\R,\R)$, $ \Gamma \in \neu$, satisfy that $\real_{\act} \lp \mathsf{G}_d \rp \in C \lp \R^d, \R \rp$, for every $\theta \in \Theta$, let $\mathcal{U}^\theta: [0,T] \rightarrow [0,T]$ and $\mathcal{W}^\theta:[0,T] \rightarrow \R^d$ be functions , for every $\theta \in \Theta$, let $U^\theta: [0,T] \rightarrow \R^d \rightarrow \R$ satisfy satisfy for all $t \in [0,T]$, $x \in \R^d$ that:
\begin{align}
@ -31,7 +31,7 @@
\begin{align}
\mathsf{P}^\theta_t = \bigoplus^M_{k=1} \lb \frac{1}{M} \triangleright \lp \mathsf{G}_d \bullet \aff_{\mathbb{I}_d, \mathcal{W}^{\theta,0,-k}_{T-t}} \rp \rb
\end{align}
Note the hypothesis that for all $\theta \in \Theta$, $t \in [0,T]$ it holds that $\mathcal{W}^\theta_t \in \R^d$ and Lemma \ref{5.6.5} applied for every $\theta \in \Theta$ $t \in [0,T]$ with $v \curvearrowleft M$, $ c_{i \in \{u,u+1,...,v\}} \curvearrowleft \lp \frac{1}{M} \rp_{i \in \{u,u+1,...,v\}}$, $\lp B_i \rp _{i \in \{u,u+1,...,v\}} \curvearrowleft \lp \mathcal{W}^{\lp \theta, 0 , -k \rp }_{T-t} \rp_{k \in \{1,2,...,M\}}$, $\lp \nu_i \rp_{i \in \{u,u+1,...,v\}} \curvearrowleft \lp \mathsf{G}_d \rp _{i \in \{u,u+1,...,v\}}$, $\mu \curvearrowleft \Phi^\theta_t$ and with the notation of Lemma \ref{5.6.5} tells us that for all $\theta \in \Theta$, $t \in [0,T]$, and $x \in \R^d$ it holds that: La lala
Note the hypothesis that for all $\theta \in \Theta$, $t \in [0,T]$ it holds that $\mathcal{W}^\theta_t \in \R^d$ and Lemma \ref{5.6.5} applied for every $\theta \in \Theta$ $t \in [0,T]$ with $v \curvearrowleft M$, $ c_{i \in \{u,u+1,...,v\}} \curvearrowleft \lp \frac{1}{M} \rp_{i \in \{u,u+1,...,v\}}$, $\lp B_i \rp _{i \in \{u,u+1,...,v\}} \curvearrowleft \lp \mathcal{W}^{\lp \theta, 0 , -k \rp }_{T-t} \rp_{k \in \{1,2,...,M\}}$, $\lp \nu_i \rp_{i \in \{u,u+1,...,v\}} \curvearrowleft \lp \mathsf{G}_d \rp _{i \in \{u,u+1,...,v\}}$, $\mu \curvearrowleft \Phi^\theta_t$ and with the notation of Lemma \ref{5.6.5} tells us that for all $\theta \in \Theta$, $t \in [0,T]$, and $x \in \R^d$ it holds that:
\begin{align}\label{8.0.6}
\lay \lp \mathsf{P}^\theta_t \rp = \lp d, M \wid_1 \lp \G \rp, M\wid_2 \lp \G \rp,...,M\wid_{\dep\lp \G \rp -1}\lp \G \rp ,1\rp = \lay \lp \sP^0_0 \rp \in \N^{\dep \lp \G \rp +1}
\end{align}
@ -503,8 +503,8 @@ Let $n, N,h\in \N$. Let $\delta,\ve \in \lp 0,\infty \rp $, $q\in \lp 2,\infty \
&= 3\ve +2\ve \left| \mathfrak{u}_d\lp x\rp\right|^q+2\ve \left| \exp \lp \int^b_afdx\rp\right|^q + \ve \left| \exp \lp \int^b_afdx\rp - \mathfrak{e}\right|^q -\mathfrak{e}\mathfrak{u}_d\lp x \rp \nonumber
\end{align}
\end{proof}
\section{The $\mathsf{UEX}^{N,h,q,\ve}_{n,\mathsf{G}_d,\omega_i}$ network}\label{UEX}
\begin{lemma}[R\textemdash,2023]\label{UE-prop}
\section{The $\mathsf{UEX}^{N,h,q,\ve}_{n,\mathsf{G}_d,\omega_i}$ network}
\begin{lemma}[R\textemdash,2023]\label{UEX}
Let $n, N,h\in \N$. Let $\delta,\ve \in \lp 0,\infty \rp $, $q\in \lp 2,\infty \rp$, satisfy that $\delta = \ve \lp 2^{q-1} +1\rp^{-1}$. Let $a\in \lp -\infty,\infty \rp$, $b \in \lb a, \infty \rp$. Let $f:[a,b] \rightarrow \R$ be continuous and have second derivatives almost everywhere in $\lb a,b \rb$. Let $a=x_0 \les x_1\les \cdots \les x_{N-1} \les x_N=b$ such that for all $i \in \{0,1,...,N\}$ it is the case that $h = \frac{b-a}{N}$, and $x_i = x_0+i\cdot h$ . Let $x = \lb x_0 \: x_1\: \cdots \: x_N \rb$ and as such let $f\lp\lb x \rb_{*,*} \rp = \lb f(x_0) \: f(x_1)\: \cdots \: f(x_N) \rb$. Let $\mathsf{E}^{\exp}_{n,h,q,\ve} \in \neu$ be the neural network given by:
\begin{align}
@ -627,7 +627,7 @@ Note that for a fixed $T \in \lp 0,\infty \rp$ it is the case that $u_d\lp t,x \
\begin{center}
\tikzset{every picture/.style={line width=0.75pt}} %set default line width to 0.75pt
\begin{tikzpicture}[x=0.75pt,y=0.75pt,yscale=-1,xscale=1]
\begin{tikzpicture}[x=0.75pt,y=0.75pt,yscale=-1,xscale=0.9]
%uncomment if require: \path (0,475); %set diagram left start at 0, and has a height of 475
%Shape: Rectangle [id:dp5014556157804896]
@ -1050,7 +1050,7 @@ Let $t \in \lp 0,\infty\rp$ and $T \in \lp t,\infty\rp$. Let $\lp \Omega, \mathc
\tikzset{every picture/.style={line width=0.75pt}} %set default line width to 0.75pt
\begin{tikzpicture}[x=0.75pt,y=0.75pt,yscale=-0.9,xscale=0.9]
\begin{tikzpicture}[x=0.75pt,y=0.75pt,yscale=-1,xscale=0.9]
%uncomment if require: \path (0,475); %set diagram left start at 0, and has a height of 475
%Shape: Rectangle [id:dp5014556157804896]
@ -1215,10 +1215,6 @@ Let $t \in \lp 0,\infty\rp$ and $T \in \lp t,\infty\rp$. Let $\lp \Omega, \mathc
\end{remark}
\begin{remark}
It may be helpful to think of this as a very crude form of ensembling.
\end{remark}

View File

@ -51,19 +51,30 @@ Parts of this code have been released on \texttt{CRAN} under the package name \t
\lstinputlisting[language = R, style = rstyle, label = Sqr_properties, caption = {R code simulations involving $\sqr$}]{"/Users/shakilrafi/R-simulations/Sqr_properties.R"}
\lstinputlisting[language = R, style = rstyle, label = Pwr, caption = {R code simulations involving $\sqr$}]{"/Users/shakilrafi/R-simulations/Pwr.R"}
\lstinputlisting[language = R, style = rstyle, label = Pwr, caption = {R code for $\pwr^{q,\ve}$ networks}]{"/Users/shakilrafi/R-simulations/Pwr.R"}
\lstinputlisting[language = R, style = rstyle, label = Pwr_3_properties, caption = {R code simulations involving $\sqr$}]{"/Users/shakilrafi/R-simulations/Pwr_3_properties.R"}
\lstinputlisting[language = R, style = rstyle, label = Pwr_3_properties, caption = {R code simulations involving $\pwr_3^{q,\ve}$}]{"/Users/shakilrafi/R-simulations/Pwr_3_properties.R"}
\lstinputlisting[language = R, style = rstyle, label = Pwr_3_properties, caption = {R code simulations involving $\sqr$}]{"/Users/shakilrafi/R-simulations/Nrm.R"}
\lstinputlisting[language = R, style = rstyle, label = Pwr_3_properties, caption = {R code simulations involving $\nrm^d_1$}]{"/Users/shakilrafi/R-simulations/Nrm.R"}
\lstinputlisting[language = R, style = rstyle, label = Pwr_3_properties, caption = {R code simulations involving $\sqr$}]{"/Users/shakilrafi/R-simulations/Mxm.R"}
\lstinputlisting[language = R, style = rstyle, label = Pwr_3_properties, caption = {R code simulations involving $\mxm_d$}]{"/Users/shakilrafi/R-simulations/Mxm.R"}
\lstinputlisting[language = R, style = rstyle, label = Pwr_3_properties, caption = {R code simulations involving $\tay$}]{"/Users/shakilrafi/R-simulations/Tay.R"}
\lstinputlisting[language = R, style = rstyle, label = Pwr_3_properties, caption = {R code simulations involving $\etr$}]{"/Users/shakilrafi/R-simulations/Etr.R"}
\newpage
\begin{center}
\textbf{Vita}
\end{center}
The author was born in November 1\textsuperscript{st}, 1992 in the city of Dhaka in the heart of Bangladesh. He grew up in the large city with a childhood that included setting things on fire, and very occasionally focusing on mathematics. He failed to achieve his childhood goal of becoming an astronomer however when he entered college at Troy University in 2011 and realized it would involve cold nights outside, and so chose mathematics instead. He has continued his pursuits in mathematics and is now a graduate student at the University of Arkansas trying to graduate.

View File

60
Dissertation/commands.aux Normal file
View File

@ -0,0 +1,60 @@
\relax
\providecommand\zref@newlabel[2]{}
\providecommand\hyper@newdestlabel[2]{}
\@setckpt{commands}{
\setcounter{page}{1}
\setcounter{equation}{0}
\setcounter{enumi}{0}
\setcounter{enumii}{0}
\setcounter{enumiii}{0}
\setcounter{enumiv}{0}
\setcounter{footnote}{0}
\setcounter{mpfootnote}{0}
\setcounter{part}{0}
\setcounter{chapter}{0}
\setcounter{section}{0}
\setcounter{subsection}{0}
\setcounter{subsubsection}{0}
\setcounter{paragraph}{0}
\setcounter{subparagraph}{0}
\setcounter{figure}{0}
\setcounter{table}{0}
\setcounter{@pps}{0}
\setcounter{@ppsavesec}{0}
\setcounter{@ppsaveapp}{0}
\setcounter{AM@survey}{0}
\setcounter{parentequation}{0}
\setcounter{footdir@label}{0}
\setcounter{tmpA}{0}
\setcounter{tmpB}{0}
\setcounter{tmpC}{0}
\setcounter{tmpD}{0}
\setcounter{tmpE}{0}
\setcounter{tmpF}{0}
\setcounter{Hijriday}{0}
\setcounter{Hijrimonth}{0}
\setcounter{Hijriyear}{0}
\setcounter{subfigure}{0}
\setcounter{lofdepth}{1}
\setcounter{subtable}{0}
\setcounter{lotdepth}{1}
\setcounter{section@level}{0}
\setcounter{Item}{0}
\setcounter{Hfootnote}{0}
\setcounter{Hy@AnnotLevel}{0}
\setcounter{bookmark@seq@number}{0}
\setcounter{NAT@ctr}{0}
\setcounter{ALG@line}{0}
\setcounter{ALG@rem}{0}
\setcounter{ALG@nested}{0}
\setcounter{ALG@Lnr}{1}
\setcounter{ALG@blocknr}{1}
\setcounter{ALG@storecount}{0}
\setcounter{ALG@tmpcounter}{0}
\setcounter{@stackindex}{0}
\setcounter{ROWcellindex@}{0}
\setcounter{lstnumber}{1}
\setcounter{theorem}{0}
\setcounter{corollary}{0}
\setcounter{lstlisting}{0}
}

View File

@ -4,55 +4,44 @@ We will present three avenues of further research and related work on parameter
\section{Further operations and further kinds of neural networks}
Note, for instance, that several classical operations are done on neural networks that have yet to be accounted for in this framework and talked about in the literature. We will discuss one of them \textit{dropout} provide lemmas that may be useful to future research.
Note, for instance, that several classical operations are done on neural networks that have yet to be accounted for in this framework and talked about in the literature. We will discuss two of them \textit{dropout} and \textit{merger} and discuss how they may be brought into this framework.
\subsection{Dropout}
Overfitting presents an important challenge for all machine learning models, including deep learning. There ex
\begin{definition}[Hadamard Product]
Let $m,n \in \N$. Let $A,B \in \R^{m \times n}$. For all $i \in \{ 1,2,\hdots,m\}$ and $j \in \{ 1,2,\hdots,n\}$ define the Hadamard product $\odot: \R^{m\times n} \times \R^{m \times n} \rightarrow \R^{m \times n}$ as:
\begin{align}
A \odot B \coloneqq \lb A \odot B \rb _{i,j} = \lb A \rb_{i,j} \times \lb B \rb_{i,j} \quad \forall i,j
\end{align}
\end{definition}
\begin{definition}[Scalar product of weights]
Let $\nu \in \neu$, $L\in \N$, $i,j,k \in \N$, and $c\in \R$. Assume also that $\lay \lp \nu\rp = \lp l_0,l_1,l_2,\hdots, l_L\rp$. Assume then that the neural network is given by $\nu = \lp \lp W_1,b_1\rp, \lp W_2,b_2\rp,\hdots, \lp W_L,b_L\rp\rp$. We will denote by $c\circledast^{i,j}\nu$ as the neural network which, for $i \in \N \cap \lb 1,L-1\rb$, $j \in \N \cap \lb 1,l_i\rb$, is given by $c \circledast^{i,j} \nu = \lp \lp W_1,b_1 \rp, \lp W_2,b_2\rp, \hdots,\lp \tilde{W}_i,b_i \rp,\lp \tilde{W}_{i+1},b_{i+1}\rp,\hdots \lp W_L,b_L\rp\rp$ where it is the case that:
\begin{align}
\tilde{W}_i = \lp \mymathbb{k}^{j,j,c-1}_{l_i,l_{i}} + \mathbb{I}_{l_i}\rp W_i
\end{align}
\end{definition}
\begin{definition}[The Dropout Operator]
Let $\nu \in \neu$, $L\in \N$, $i_1,i_2,\hdots, i_k,j,k \in \N$, and $c_1,c_2,\hdots,c_k\in \R$. Assume also that $\lay \lp \nu\rp = \lp l_0,l_1,l_2,\hdots, l_L\rp$. Assume then that the neural network is given by $\nu = \lp \lp W_1,b_1\rp, \lp W_2,b_2\rp,\hdots, \lp W_L,b_L\rp\rp$. We will denote by $\dropout_n^{\unif}\lp \nu \rp$ the neural network that is given by:
\begin{align}
0\circledast^{i_1,j_1} \lp 0 \circledast^{i_2,j_2}\lp \hdots 0\circledast^{i_n,j_n}\nu \hdots \rp\rp
\end{align}
Where for each $k \in \{1,2,\hdots,n \}$ it is the case that $i \sim \unif \{ 1,L-1\}$ and $j\sim \unif\{1,l_j\} $
\end{definition}
We will also define the dropout operator introduced in \cite{srivastava_dropout_2014}.
We will also define the dropout operator introduced in \cite{srivastava_dropout_2014}, and explained further in \cite{Goodfellow-et-al-2016}.
\begin{definition}[Realization with dropout]
Let $\nu \in \neu$, $L,n \in \N$, $p \in \lp 0,1\rp$, $\lay \lp \nu\rp = \lp l_0,l_1,\hdots, \l_L\rp$, and that $\nu = \lp \lp W_1,b_1\rp, \lp W_2,b_2\rp, \hdots , \lp W_L,b_L\rp \rp$. Let it be the case that for each $n\in \N$, $\rho_n = \{ x_1,x_2,\hdots,x_n\} \in \R^n$ where for each $i \in \{1,2,\hdots,n\}$ it is the case that $x_i \sim \bern(p)$. We will then denote $\real_{\rect}^{D,p} \lp \nu \rp \in C\lp \R^{\inn\lp \nu\rp},\R^{\out\lp \nu \rp}\rp$, the continuous function given by:
Let $\nu \in \neu$, $L,n \in \N$, $p \in \lp 0,1\rp$, $\lay \lp \nu\rp = \lp l_0,l_1,\hdots, \l_L\rp$, and that $\nu = \lp \lp W_1,b_1\rp, \lp W_2,b_2\rp, \hdots , \lp W_L,b_L\rp \rp$. Let it be the case that for each $n\in \N$, $\rho_n = \{ x_1,x_2,\hdots,x_n\} \in \R^n$ where for each $i \in \{1,2,\hdots,n\}$ it is the case that $x_i \sim \bern(p)$. We will then denote $\real_{\act}^{D,p} \lp \nu \rp \in C\lp \R^{\inn\lp \nu\rp},\R^{\out\lp \nu \rp}\rp$, the continuous function given by:
\begin{align}
\real_{\rect}^{D,p}\lp \nu \rp = \rho_{l_L}\odot \rect \lp W_l\lp \rho_{l_{L-1}} \odot \rect \lp W_{L-1}\lp \hdots\rp + b_{L-1}\rp\rp + b_L\rp
\real_{\act}^{D,p}\lp \nu \rp = \rho_{l_L}\odot \act \lp W_l\lp \rho_{l_{L-1}} \odot \act \lp W_{L-1}\lp \hdots\rp + b_{L-1}\rp\rp + b_L\rp
\end{align}
\end{definition}
Dropout is an example of \textit{ensemble learning}, a form of learning where versions of our model (e.g. random forests or neural networks) are made (e.g. by dropout for neural networks or by enforcing a maximum depth to the trees in our forest), and a weighted average of the predictions of our different models is taken to be the predictive model. That such a model can work, and indeed work well is the subject of \cite{schapire_strength_1990}.
Dropout is an example of \textit{ensemble learning}, a form of learning where versions of our model (e.g. random forests or neural networks) are made (e.g. by dropout for neural networks or by enforcing a maximum depth to the trees in our forest), and a weighted average of the predictions of our different models is taken to be the predictive model. That such a model can work, and indeed work well, is the subject of \cite{schapire_strength_1990}.
\subsection{Further Approximants}
\section{Further Approximants}
In theory the approximation schemes given in the case of $\xpn_n^{q,\ve}, \csn_n^{q,\ve}$, and $\sne_n^{q,\ve}$ given in the previous sections, could be used to approximate more transcendental functions, and identities such as alluded to in Remark \ref{rem:pyth_idt}. Indeed, recent attempts have been made to approximate backwards and forward Euler methods as in \cite{grohs2019spacetime}. In fact, we may this architecture was originally envisioned to approximate, Multi-Level Picard iterations, as seen in \cite{ackermann2023deep}. These neural network methods have been proven to beat the curse of dimensionality in the sense that the size of these networks (parameter and depth counts) grow only polynomially with respect to the desired accuracy. In practice, it remains to be seen whether for larger dimensions, the increased number of operations and architectures to contend with do not make up for the polynomial increase in parameter and depths, especially when it comes to computaiton time.
In theory the approximation schemes given in the case of $\xpn_n^{q,\ve}, \csn_n^{q,\ve}$, and $\sne_n^{q,\ve}$ given in the previous sections, could be used to approximate more transcendental functions, and identities such as alluded to in Remark \ref{rem:pyth_idt}. Indeed, recent attempts have been made to approximate backwards and forward Euler methods as in \cite{grohs2019spacetime}. In fact, this architecture was originally envisioned to approximate, Multi-Level Picard iterations, as seen in \cite{ackermann2023deep}. These neural network methods have been proven to beat the curse of dimensionality in the sense that the size of these networks (parameter and depth counts) grow only polynomially with respect to the desired accuracy. In practice, it remains to be seen whether for larger dimensions, the increased number of operations and architectures to contend with do not make up for the polynomial increase in parameter and depths, especially when it comes to computaiton time.
In a similar note, these architectures have so far lacked a consistent implementation in a widely available programming language. Part of the dissertation work has been focused on implementing these architectures as an $\texttt{R}$ package, available at \texttt{CRAN}.
In a similar vein, these architectures have so far lacked a consistent implementation in a widely available programming language. Part of the dissertation work has been focused on implementing these architectures as an $\texttt{R}$ package, available at \texttt{CRAN}.
\subsection{Algebraic Properties of this Framework}
\section{Algebraic Properties of this Framework}
It is quite straightforward to see that the instantiation operation has sufficiently functorial properties, at the very least, when instantiating with the identity function. More specifically consider the category \texttt{Mat} whose objects are natural numbers, $m,n$, and whose arrows $m \xleftarrow{A} n$ are matrices $A \in \R^{m\times n}$, i.e. a continuous function between vector spaces $\R^n$ and $\R^m$ respectively. Consider as well the set of neural networks $\nu \subsetneq \neu$ where $\inn\lp \nu \rp = n$ and $\out\lp \nu \rp = m$.
\\
In such a case, note that the instantiation operation preserves the axiom of functoriality, namely that composition is respected under instantiation. Note also that we have alluded to the fact that under neural network composition, with $\id$ (the appropriate one for our dimension) behaves like a monoid under instantiation.
A further exploration of the algebraic properties of this framework could present a fruitful avenue of future study.
Note for example that a neural network analog for derivatives, one that respects the chain rule under instantiation already exist in the literature, e.g. \cite{nn_diff}. Thus there is a growing and rather rich and growing set of algebraic operations that are and have been proposed for neural networks.
A further exploration of the algebraic properties of this artificial neural network framework could present a fruitful avenue of future study.
This completes this Dissertation.

View File

@ -0,0 +1,60 @@
\relax
\providecommand\zref@newlabel[2]{}
\providecommand\hyper@newdestlabel[2]{}
\@setckpt{front_matter}{
\setcounter{page}{5}
\setcounter{equation}{0}
\setcounter{enumi}{0}
\setcounter{enumii}{0}
\setcounter{enumiii}{0}
\setcounter{enumiv}{0}
\setcounter{footnote}{0}
\setcounter{mpfootnote}{0}
\setcounter{part}{0}
\setcounter{chapter}{0}
\setcounter{section}{0}
\setcounter{subsection}{0}
\setcounter{subsubsection}{0}
\setcounter{paragraph}{0}
\setcounter{subparagraph}{0}
\setcounter{figure}{0}
\setcounter{table}{0}
\setcounter{@pps}{0}
\setcounter{@ppsavesec}{0}
\setcounter{@ppsaveapp}{0}
\setcounter{AM@survey}{0}
\setcounter{parentequation}{0}
\setcounter{footdir@label}{0}
\setcounter{tmpA}{0}
\setcounter{tmpB}{0}
\setcounter{tmpC}{0}
\setcounter{tmpD}{0}
\setcounter{tmpE}{0}
\setcounter{tmpF}{0}
\setcounter{Hijriday}{0}
\setcounter{Hijrimonth}{0}
\setcounter{Hijriyear}{0}
\setcounter{subfigure}{0}
\setcounter{lofdepth}{1}
\setcounter{subtable}{0}
\setcounter{lotdepth}{1}
\setcounter{section@level}{0}
\setcounter{Item}{0}
\setcounter{Hfootnote}{0}
\setcounter{Hy@AnnotLevel}{0}
\setcounter{bookmark@seq@number}{0}
\setcounter{NAT@ctr}{0}
\setcounter{ALG@line}{0}
\setcounter{ALG@rem}{0}
\setcounter{ALG@nested}{0}
\setcounter{ALG@Lnr}{1}
\setcounter{ALG@blocknr}{1}
\setcounter{ALG@storecount}{0}
\setcounter{ALG@tmpcounter}{0}
\setcounter{@stackindex}{0}
\setcounter{ROWcellindex@}{0}
\setcounter{lstnumber}{1}
\setcounter{theorem}{0}
\setcounter{corollary}{0}
\setcounter{lstlisting}{0}
}

View File

@ -0,0 +1,142 @@
\begin{singlespace}
\begin{center}
Artificial Neural Networks Applied to Stochastic Monte Carlo as a Way to Approximate Modified Heat Equations, and Their Associated Parameters, Depths, and Accuracies.
\end{center}
\vspace{0.5cm}
\begin{center}
A dissertation submitted in partial fulfillment \\
of the requirements for the degree of \\
Doctor of Philosophy in Mathematics
\end{center}
\vspace{1cm}
\begin{center}
by
\end{center}
\vspace{0.5cm}
\begin{center}
Shakil Ahmed Rafi \\
Troy University \\
Bachelor of Science in Mathematics, 2015 \\
University of Arkansas \\
Master of Science in Mathematics, 2019
\end{center}
\vspace{0.5cm}
\begin{center}
May 2024 \\
University of Arkansas
\end{center}
\vspace{0.5cm}
This dissertation is approved for recommendation to the Graduate Council.
\vspace{1.5cm}
\begin{center}
\noindent\hspace*{0cm}\rule{7cm}{0.7pt} \\
Joshua Lee Padgett, Ph.D.\\
Dissertation Director
\end{center}
\vspace{1cm}
\begin{minipage}{0.5\textwidth}
\begin{center}
\noindent\hspace*{0cm}\rule{7cm}{0.7pt} \\
Ukash Nakarmi, Ph.D.\\
Committee Member
\end{center}
\end{minipage}
\begin{raggedleft}
\begin{minipage}{0.5\textwidth}
\begin{center}
\noindent\hspace*{0cm}\rule{7cm}{0.7pt} \\
Jiahui Chen, Ph.D.\\
Committee Member
\end{center}
\end{minipage}
\end{raggedleft}
\vspace{0.5cm}
\begin{center}
\noindent\hspace*{0cm}\rule{7cm}{0.7pt} \\
Tulin Kaman, Ph.D.\\
Committee Member
\end{center}
\vspace{1cm}
\newpage
\begin{center}
\textbf{Abstract}
\end{center}
This dissertation seeks to explore a certain calculus for artificial neural networks. Specifically we will be looking at versions of the heat equation, and exploring strategies on how to approximate them.
\\~\\
Our strategy towards the beginning will be to take a technique called Multi-Level Picard (MLP), and present a simplified version of it showing that it converges to a solution of the equation $\lp \frac{\partial}{\partial t}u_d\rp\lp t,x\rp = \lp \nabla^2_x u_d\rp\lp t,x\rp$.
\\~\\
We will then take a small detour exploring the viscosity super-solution properties of solutions to such equations. It is here that we will first encounter Feynman-Kac, and see that solutions to these equations can be expressed the expected value of a certain stochastic integral.
\\~\\
The final and last part of the dissertation will be dedicated to expanding a certain neural network framework. We will build on this framework by introducing new operations, namely raising to a power, and use this to build out neural network polynomials. This opens the gateway for approximating transcendental functions such as $\exp\lp x\rp,\sin\lp x\rp$, and $\cos\lp x\rp$. This, coupled with a trapezoidal rule mechanism for integration allows us to approximate expressions of the form $\exp \lp \int_a^b \square dt\rp$.
\\~\\
We will, in the last chapter, look at how the technology of neural networks developed in the previous two chapters work towards approximating the expression that Feynman-Kac asserts must be the solution to these modified heat equations. We will then end by giving approximate bounds for the error in the Monte Carlo method. All the while we will maintain that the parameter estimates and depth estimates remain polynomial on $\frac{1}{\ve}$.
\\~\\
As an added bonus we will also look at the simplified MLP technque from the previous chapters of this dissertation and show that yes, they can indeed be approximated with artificial neural networks, and that yes, they can be done so with neural networks whose parameters and depth counts grow only polynomially on $\frac{1}{\ve}$.
\\~\\
Our appendix will contain code listings of these neural network operations, some of the architectures, and some small scale simulation results.
\newpage
\begin{center}
\vspace*{\fill}
\copyright 2024 Shakil Ahmed Rafi \\
All rights reserved.
\vspace*{\fill}
\end{center}
\newpage
\begin{center}
\textbf{Acknowledgements}
\end{center}
I would like to acknowledge my advisor Dr. Joshua Padgett who has been instrumental in me Ph.D. journey. I am incredibly thankful for him taking the time out of his busy schedule to meet with me over the weekends and helping me finish my dissertation. Without his help, guidance, and patience I would never have been where I am today. You not only taught me mathematics, but also how to be a mathematician. Thank you.
\\~\\
I would also like to thank my department, and everyone there, including, but not limited to Dr. Andrew Raich, for his incredible patience and helpful guidance throughout the years. I would also like to thank Dr. Ukash Nakarmi for the excellent collaboartions I've had. I would also to Egan Meaux for all the little things he does to keep the department going.
\\~\\
I would like to acknowledge Marufa Mumu for believing in me when I didn't. You really made the last few months of writing this dissertation, less painful.
\\~\\
I would like to acknowledge my cat, a beautiful Turkish Angora, Tommy. He was pretty useless, but stroking his fur made me stress a little less.
\\~\\
Finally, I would like to thank Valetta Ventures, Inc. and their product Texifier. It is marvel of software engineering and made the process of creating this dissertation much less painful than it already was.
\newpage
\begin{center}
\vspace*{\fill}
Dedicated to my grandparents, \\
M.A. Hye, M.A., \& Nilufar Hye\\
who would've love to see this but can't; \\
to my parents, \\
Kamal Uddin Ahmed, M.A., \& Shahnaz Parveen, M.A.,\\
who kept faith in me, always; \\
and finally to my brothers, \\
Wakil Ahmed Shabi, BBA \& Nabeel Ahmed Sami, B.Eng., \\
for whom I have been too imperfect a role model.\\
\vspace*{\fill}
\end{center}
\newpage
\begin{center}
\vspace*{\fill}
\textit{Read, in the name of your Lord}\\
\textemdash Surah Al-Alaq:1\\~\\
\textit{The conquest of nature must be achieved with number and measure.} \\
\textemdash Ren\'e Descartes \\
\vspace*{\fill}
\end{center}
\newpage
\end{singlespace}

View File

@ -1,200 +0,0 @@
\relax
\providecommand\hyper@newdestlabel[2]{}
\providecommand\HyperFirstAtBeginDocument{\AtBeginDocument}
\HyperFirstAtBeginDocument{\ifx\hyper@anchor\@undefined
\global\let\oldnewlabel\newlabel
\gdef\newlabel#1#2{\newlabelxx{#1}#2}
\gdef\newlabelxx#1#2#3#4#5#6{\oldnewlabel{#1}{{#2}{#3}}}
\AtEndDocument{\ifx\hyper@anchor\@undefined
\let\newlabel\oldnewlabel
\fi}
\fi}
\global\let\hyper@last\relax
\gdef\HyperFirstAtBeginDocument#1{#1}
\providecommand\HyField@AuxAddToFields[1]{}
\providecommand\HyField@AuxAddToCoFields[2]{}
\@writefile{toc}{\contentsline {part}{I\hspace {1em}On Convergence of Brownian Motion Monte Carlo}{4}{part.1}\protected@file@percent }
\@input{Introduction.aux}
\@input{Brownian_motion_monte_carlo.aux}
\@input{u_visc_sol.aux}
\@input{brownian_motion_monte_carlo_non_linear_case.aux}
\@writefile{toc}{\contentsline {part}{II\hspace {1em}A Structural Description of Artificial Neural Networks}{61}{part.2}\protected@file@percent }
\@input{neural_network_introduction.aux}
\@input{ann_product.aux}
\@input{modified_mlp_associated_nn.aux}
\@input{categorical_neural_network.aux}
\@writefile{toc}{\contentsline {chapter}{\numberline {9}ANN first approximations}{133}{chapter.9}\protected@file@percent }
\@writefile{lof}{\addvspace {10\p@ }}
\@writefile{lot}{\addvspace {10\p@ }}
\@writefile{toc}{\contentsline {section}{\numberline {9.1}Activation Function as Neural Networks}{133}{section.9.1}\protected@file@percent }
\newlabel{actnn}{{9.1.1}{133}{Activation ANN}{theorem.9.1.1}{}}
\newlabel{actnn@cref}{{[definition][1][9,1]9.1.1}{[1][133][]133}}
\newlabel{7.1.2}{{9.1.2}{133}{}{theorem.9.1.2}{}}
\newlabel{7.1.2@cref}{{[lemma][2][9,1]9.1.2}{[1][133][]133}}
\newlabel{6.0.3}{{9.1.3}{133}{}{theorem.9.1.3}{}}
\newlabel{6.0.3@cref}{{[lemma][3][9,1]9.1.3}{[1][133][]133}}
\citation{grohs2019spacetime}
\@writefile{toc}{\contentsline {section}{\numberline {9.2}ANN Representations for One-Dimensional Identity}{134}{section.9.2}\protected@file@percent }
\newlabel{7.2.1}{{9.2.1}{134}{Identity Neural Network}{theorem.9.2.1}{}}
\newlabel{7.2.1@cref}{{[definition][1][9,2]9.2.1}{[1][134][]134}}
\newlabel{7.2.2}{{9.2.2}{135}{Identity Neural Network}{equation.9.2.2}{}}
\newlabel{7.2.2@cref}{{[equation][2][9,2]9.2.2}{[1][134][]135}}
\newlabel{idprop}{{9.2.2}{135}{}{theorem.9.2.2}{}}
\newlabel{idprop@cref}{{[lemma][2][9,2]9.2.2}{[1][135][]135}}
\newlabel{7.2.3}{{9.2.3}{136}{}{theorem.9.2.3}{}}
\newlabel{7.2.3@cref}{{[lemma][3][9,2]9.2.3}{[1][136][]136}}
\newlabel{7.2.8}{{9.2.8}{137}{ANN Representations for One-Dimensional Identity}{equation.9.2.8}{}}
\newlabel{7.2.8@cref}{{[equation][8][9,2]9.2.8}{[1][137][]137}}
\@writefile{toc}{\contentsline {section}{\numberline {9.3}Modulus of Continuity}{142}{section.9.3}\protected@file@percent }
\newlabel{9.3.1}{{9.3.1}{142}{}{equation.9.3.1}{}}
\newlabel{9.3.1@cref}{{[equation][1][9,3]9.3.1}{[1][142][]142}}
\newlabel{lem:9.3.3}{{9.3.3}{142}{}{theorem.9.3.3}{}}
\newlabel{lem:9.3.3@cref}{{[lemma][3][9,3]9.3.3}{[1][142][]142}}
\@writefile{toc}{\contentsline {section}{\numberline {9.4}Linear Interpolation of real-valued functions}{143}{section.9.4}\protected@file@percent }
\@writefile{toc}{\contentsline {subsection}{\numberline {9.4.1}The Linear Interpolation Operator}{143}{subsection.9.4.1}\protected@file@percent }
\newlabel{lio}{{9.4.1}{143}{Linear Interpolation Operator}{theorem.9.4.1}{}}
\newlabel{lio@cref}{{[definition][1][9,4]9.4.1}{[1][143][]143}}
\newlabel{7.3.1}{{9.4.1}{143}{}{equation.9.4.1}{}}
\newlabel{7.3.1@cref}{{[equation][1][9,4]9.4.1}{[1][143][]143}}
\newlabel{7.3.2}{{9.4.2}{143}{}{equation.9.4.2}{}}
\newlabel{7.3.2@cref}{{[equation][2][9,4]9.4.2}{[1][143][]143}}
\newlabel{lem:9.4.3}{{9.4.3}{144}{}{theorem.9.4.3}{}}
\newlabel{lem:9.4.3@cref}{{[lemma][3][9,4]9.4.3}{[1][144][]144}}
\@writefile{toc}{\contentsline {subsection}{\numberline {9.4.2}Neural Networks to approximate the $\lin $ operator}{144}{subsection.9.4.2}\protected@file@percent }
\newlabel{7.3.3}{{9.4.4}{144}{}{theorem.9.4.4}{}}
\newlabel{7.3.3@cref}{{[lemma][4][9,4]9.4.4}{[1][144][]144}}
\citation{Grohs_2022}
\newlabel{9.3.4}{{9.4.5}{145}{}{theorem.9.4.5}{}}
\newlabel{9.3.4@cref}{{[lemma][5][9,4]9.4.5}{[1][145][]145}}
\newlabel{7.3.5}{{9.4.7}{145}{}{equation.9.4.7}{}}
\newlabel{7.3.5@cref}{{[equation][7][9,4]9.4.7}{[1][145][]145}}
\newlabel{7.3.6}{{9.4.8}{145}{Neural Networks to approximate the $\lin $ operator}{equation.9.4.8}{}}
\newlabel{7.3.6@cref}{{[equation][8][9,4]9.4.8}{[1][145][]145}}
\citation{Grohs_2022}
\newlabel{7.3.8}{{9.4.10}{146}{Neural Networks to approximate the $\lin $ operator}{equation.9.4.10}{}}
\newlabel{7.3.8@cref}{{[equation][10][9,4]9.4.10}{[1][146][]146}}
\newlabel{7.3.10.2}{{9.4.11}{146}{Neural Networks to approximate the $\lin $ operator}{equation.9.4.11}{}}
\newlabel{7.3.10.2@cref}{{[equation][11][9,4]9.4.11}{[1][146][]146}}
\newlabel{7.3.10}{{9.4.12}{146}{}{equation.9.4.12}{}}
\newlabel{7.3.10@cref}{{[equation][12][9,4]9.4.12}{[1][146][]146}}
\newlabel{7.3.13}{{9.4.15}{147}{Neural Networks to approximate the $\lin $ operator}{equation.9.4.15}{}}
\newlabel{7.3.13@cref}{{[equation][15][9,4]9.4.15}{[1][146][]147}}
\newlabel{7.3.14}{{9.4.16}{147}{}{equation.9.4.16}{}}
\newlabel{7.3.14@cref}{{[equation][16][9,4]9.4.16}{[1][147][]147}}
\@writefile{toc}{\contentsline {section}{\numberline {9.5}Neural network approximation of 1-dimensional functions.}{148}{section.9.5}\protected@file@percent }
\newlabel{lem:9.5.1}{{9.5.1}{148}{}{theorem.9.5.1}{}}
\newlabel{lem:9.5.1@cref}{{[lemma][1][9,5]9.5.1}{[1][148][]148}}
\newlabel{lem:9.5.2}{{9.5.2}{149}{}{theorem.9.5.2}{}}
\newlabel{lem:9.5.2@cref}{{[lemma][2][9,5]9.5.2}{[1][149][]149}}
\newlabel{(9.5.4)}{{9.5.4}{150}{Neural network approximation of 1-dimensional functions}{equation.9.5.4}{}}
\newlabel{(9.5.4)@cref}{{[equation][4][9,5]9.5.4}{[1][149][]150}}
\@writefile{toc}{\contentsline {section}{\numberline {9.6}$\trp ^h$ and neural network approximations for the trapezoidal rule.}{151}{section.9.6}\protected@file@percent }
\newlabel{(9.6.3)}{{9.6.3}{152}{}{equation.9.6.3}{}}
\newlabel{(9.6.3)@cref}{{[equation][3][9,6]9.6.3}{[1][152][]152}}
\newlabel{(9.6.6)}{{9.6.6}{153}{$\trp ^h$ and neural network approximations for the trapezoidal rule}{equation.9.6.6}{}}
\newlabel{(9.6.6)@cref}{{[equation][6][9,6]9.6.6}{[1][153][]153}}
\@writefile{toc}{\contentsline {section}{\numberline {9.7}Linear interpolation for multi-dimensional functions}{154}{section.9.7}\protected@file@percent }
\@writefile{toc}{\contentsline {subsection}{\numberline {9.7.1}The $\nrm ^d_1$ and $\mxm ^d$ networks}{154}{subsection.9.7.1}\protected@file@percent }
\newlabel{(9.7.1)}{{9.7.1}{154}{The $\nrm _1^d$ neural network}{equation.9.7.1}{}}
\newlabel{(9.7.1)@cref}{{[equation][1][9,7]9.7.1}{[1][154][]154}}
\newlabel{9.7.2}{{9.7.2}{154}{}{theorem.9.7.2}{}}
\newlabel{9.7.2@cref}{{[lemma][2][9,7]9.7.2}{[1][154][]154}}
\citation{grohs2019spacetime}
\citation{grohs2019spacetime}
\citation{grohs2019spacetime}
\citation{grohs2019spacetime}
\citation{grohs2019spacetime}
\newlabel{9.7.6}{{9.7.10}{157}{Maxima ANN representations}{equation.9.7.10}{}}
\newlabel{9.7.6@cref}{{[equation][10][9,7]9.7.10}{[1][156][]157}}
\newlabel{9.7.4}{{9.7.4}{157}{}{theorem.9.7.4}{}}
\newlabel{9.7.4@cref}{{[lemma][4][9,7]9.7.4}{[1][157][]157}}
\newlabel{9.7.7}{{9.7.11}{157}{The $\nrm ^d_1$ and $\mxm ^d$ networks}{equation.9.7.11}{}}
\newlabel{9.7.7@cref}{{[equation][11][9,7]9.7.11}{[1][157][]157}}
\newlabel{9.7.8}{{9.7.12}{157}{The $\nrm ^d_1$ and $\mxm ^d$ networks}{equation.9.7.12}{}}
\newlabel{9.7.8@cref}{{[equation][12][9,7]9.7.12}{[1][157][]157}}
\citation{grohs2019spacetime}
\newlabel{9.7.10}{{9.7.14}{158}{The $\nrm ^d_1$ and $\mxm ^d$ networks}{equation.9.7.14}{}}
\newlabel{9.7.10@cref}{{[equation][14][9,7]9.7.14}{[1][158][]158}}
\newlabel{9.7.11}{{9.7.15}{158}{The $\nrm ^d_1$ and $\mxm ^d$ networks}{equation.9.7.15}{}}
\newlabel{9.7.11@cref}{{[equation][15][9,7]9.7.15}{[1][158][]158}}
\newlabel{9.7.12}{{9.7.16}{158}{The $\nrm ^d_1$ and $\mxm ^d$ networks}{equation.9.7.16}{}}
\newlabel{9.7.12@cref}{{[equation][16][9,7]9.7.16}{[1][158][]158}}
\newlabel{9.7.13}{{9.7.17}{158}{The $\nrm ^d_1$ and $\mxm ^d$ networks}{equation.9.7.17}{}}
\newlabel{9.7.13@cref}{{[equation][17][9,7]9.7.17}{[1][158][]158}}
\citation{grohs2019spacetime}
\citation{grohs2019spacetime}
\newlabel{9.7.15}{{9.7.19}{159}{The $\nrm ^d_1$ and $\mxm ^d$ networks}{equation.9.7.19}{}}
\newlabel{9.7.15@cref}{{[equation][19][9,7]9.7.19}{[1][158][]159}}
\newlabel{9.7.16}{{9.7.20}{159}{The $\nrm ^d_1$ and $\mxm ^d$ networks}{equation.9.7.20}{}}
\newlabel{9.7.16@cref}{{[equation][20][9,7]9.7.20}{[1][159][]159}}
\citation{grohs2019spacetime}
\@writefile{toc}{\contentsline {subsection}{\numberline {9.7.2}The $\mxm ^d$ neural network and maximum convolutions }{160}{subsection.9.7.2}\protected@file@percent }
\newlabel{(9.7.5)}{{9.7.5}{160}{}{theorem.9.7.5}{}}
\newlabel{(9.7.5)@cref}{{[lemma][5][9,7]9.7.5}{[1][160][]160}}
\newlabel{9.7.20}{{9.7.26}{160}{}{equation.9.7.26}{}}
\newlabel{9.7.20@cref}{{[equation][26][9,7]9.7.26}{[1][160][]160}}
\citation{grohs2019spacetime}
\citation{grohs2019spacetime}
\citation{grohs2019spacetime}
\citation{grohs2019spacetime}
\citation{grohs2019spacetime}
\citation{grohs2019spacetime}
\citation{grohs2019spacetime}
\citation{grohs2019spacetime}
\citation{grohs2019spacetime}
\citation{grohs2019spacetime}
\citation{grohs2019spacetime}
\citation{grohs2019spacetime}
\citation{grohs2019spacetime}
\@writefile{toc}{\contentsline {subsection}{\numberline {9.7.3}Lipschitz function approximations}{164}{subsection.9.7.3}\protected@file@percent }
\newlabel{(9.7.6)}{{9.7.3}{164}{Lipschitz function approximations}{subsection.9.7.3}{}}
\newlabel{(9.7.6)@cref}{{[subsection][3][9,7]9.7.3}{[1][164][]164}}
\newlabel{9.7.30}{{9.7.41}{164}{}{equation.9.7.41}{}}
\newlabel{9.7.30@cref}{{[equation][41][9,7]9.7.41}{[1][164][]164}}
\newlabel{9.7.31}{{9.7.42}{165}{}{equation.9.7.42}{}}
\newlabel{9.7.31@cref}{{[equation][42][9,7]9.7.42}{[1][165][]165}}
\newlabel{9.7.32}{{9.7.43}{165}{Lipschitz function approximations}{equation.9.7.43}{}}
\newlabel{9.7.32@cref}{{[equation][43][9,7]9.7.43}{[1][165][]165}}
\newlabel{9.7.33}{{9.7.44}{165}{Lipschitz function approximations}{equation.9.7.44}{}}
\newlabel{9.7.33@cref}{{[equation][44][9,7]9.7.44}{[1][165][]165}}
\newlabel{9.7.6.1}{{9.7.6.1}{166}{}{corollary.9.7.6.1}{}}
\newlabel{9.7.6.1@cref}{{[corollary][1][]9.7.6.1}{[1][166][]166}}
\@writefile{toc}{\contentsline {subsection}{\numberline {9.7.4}Explicit ANN approximations }{167}{subsection.9.7.4}\protected@file@percent }
\newlabel{(9.7.42)}{{9.7.53}{167}{}{equation.9.7.53}{}}
\newlabel{(9.7.42)@cref}{{[equation][53][9,7]9.7.53}{[1][167][]167}}
\newlabel{9.7.43}{{9.7.54}{167}{Explicit ANN approximations}{equation.9.7.54}{}}
\newlabel{9.7.43@cref}{{[equation][54][9,7]9.7.54}{[1][167][]167}}
\newlabel{(9.7.44)}{{9.7.55}{167}{Explicit ANN approximations}{equation.9.7.55}{}}
\newlabel{(9.7.44)@cref}{{[equation][55][9,7]9.7.55}{[1][167][]167}}
\@writefile{toc}{\contentsline {part}{III\hspace {1em}A deep-learning solution for $u$ and Brownian motions}{169}{part.3}\protected@file@percent }
\@input{ann_rep_brownian_motion_monte_carlo.aux}
\citation{*}
\bibdata{Dissertation.bib}
\bibcite{bass_2011}{{1}{2011}{{Bass}}{{}}}
\bibcite{Beck_2021}{{2}{2021a}{{Beck et~al.}}{{}}}
\bibcite{BHJ21}{{3}{2021b}{{Beck et~al.}}{{}}}
\bibcite{bhj20}{{4}{2021c}{{Beck et~al.}}{{}}}
\bibcite{crandall_lions}{{5}{1992}{{Crandall et~al.}}{{}}}
\bibcite{da_prato_zabczyk_2002}{{6}{2002}{{Da~Prato and Zabczyk}}{{}}}
\bibcite{durrett2019probability}{{7}{2019}{{Durrett}}{{}}}
\bibcite{golub2013matrix}{{8}{2013}{{Golub and Van~Loan}}{{}}}
\bibcite{grohsetal}{{9}{2018}{{Grohs et~al.}}{{}}}
\bibcite{grohs2019spacetime}{{10}{2023}{{Grohs et~al.}}{{}}}
\bibcite{Grohs_2022}{{11}{2022}{{Grohs et~al.}}{{}}}
\bibcite{Gyngy1996ExistenceOS}{{12}{1996}{{Gy{\"o}ngy and Krylov}}{{}}}
\bibcite{hutzenthaler_overcoming_2020}{{13}{2020a}{{Hutzenthaler et~al.}}{{}}}
\bibcite{hutzenthaler_strong_2021}{{14}{2021}{{Hutzenthaler et~al.}}{{}}}
\bibcite{hjw2020}{{15}{2020b}{{Hutzenthaler et~al.}}{{}}}
\bibcite{Ito1942a}{{16}{1942a}{{It\^o}}{{}}}
\bibcite{Ito1946}{{17}{1942b}{{It\^o}}{{}}}
\bibcite{karatzas1991brownian}{{18}{1991}{{Karatzas and Shreve}}{{}}}
\bibcite{rio_moment_2009}{{19}{2009}{{Rio}}{{}}}
\bibstyle{apalike}
\@writefile{toc}{\contentsline {chapter}{Appendices}{180}{section*.3}\protected@file@percent }
\newlabel{pythoncode}{{1}{181}{Python Code}{lstlisting.a..1}{}}
\newlabel{pythoncode@cref}{{[lstlisting][1][0]1}{[1][181][]181}}
\@writefile{lol}{\contentsline {lstlisting}{\numberline {1}Python Code}{181}{lstlisting.a..1}\protected@file@percent }
\newlabel{pythoncode}{{2}{183}{Python Code}{lstlisting.a..2}{}}
\newlabel{pythoncode@cref}{{[lstlisting][2][0]2}{[1][183][]183}}
\@writefile{lol}{\contentsline {lstlisting}{\numberline {2}Python Code}{183}{lstlisting.a..2}\protected@file@percent }
\gdef \@abspage@last{185}

View File

@ -1,120 +0,0 @@
\begin{thebibliography}{}
\bibitem[Bass, 2011]{bass_2011}
Bass, R.~F. (2011).
\newblock {\em Brownian Motion}, page 612.
\newblock Cambridge Series in Statistical and Probabilistic Mathematics.
Cambridge University Press.
\bibitem[Beck et~al., 2021a]{Beck_2021}
Beck, C., Gonon, L., Hutzenthaler, M., and Jentzen, A. (2021a).
\newblock On existence and uniqueness properties for solutions of stochastic
fixed point equations.
\newblock {\em Discrete \& Continuous Dynamical Systems - B}, 26(9):4927.
\bibitem[Beck et~al., 2021b]{BHJ21}
Beck, C., Hutzenthaler, M., and Jentzen, A. (2021b).
\newblock On nonlinear {Feynman}{\textendash}{Kac} formulas for viscosity
solutions of semilinear parabolic partial differential equations.
\newblock {\em Stochastics and Dynamics}, 21(08).
\bibitem[Beck et~al., 2021c]{bhj20}
Beck, C., Hutzenthaler, M., and Jentzen, A. (2021c).
\newblock On nonlinear feynmankac formulas for viscosity solutions of
semilinear parabolic partial differential equations.
\newblock {\em Stochastics and Dynamics}, 21(08):2150048.
\bibitem[Crandall et~al., 1992]{crandall_lions}
Crandall, M.~G., Ishii, H., and Lions, P.-L. (1992).
\newblock Users guide to viscosity solutions of second order partial
differential equations.
\newblock {\em Bull. Amer. Math. Soc.}, 27(1):1--67.
\bibitem[Da~Prato and Zabczyk, 2002]{da_prato_zabczyk_2002}
Da~Prato, G. and Zabczyk, J. (2002).
\newblock {\em Second Order Partial Differential Equations in Hilbert Spaces}.
\newblock London Mathematical Society Lecture Note Series. Cambridge University
Press.
\bibitem[Durrett, 2019]{durrett2019probability}
Durrett, R. (2019).
\newblock {\em Probability: Theory and Examples}.
\newblock Cambridge Series in Statistical and Probabilistic Mathematics.
Cambridge University Press.
\bibitem[Golub and Van~Loan, 2013]{golub2013matrix}
Golub, G. and Van~Loan, C. (2013).
\newblock {\em Matrix Computations}.
\newblock Johns Hopkins Studies in the Mathematical Sciences. Johns Hopkins
University Press.
\bibitem[Grohs et~al., 2018]{grohsetal}
Grohs, P., Hornung, F., Jentzen, A., and von Wurstemberger, P. (2018).
\newblock {A proof that artificial neural networks overcome the curse of
dimensionality in the numerical approximation of Black-Scholes partial
differential equations}.
\newblock Papers 1809.02362, arXiv.org.
\bibitem[Grohs et~al., 2023]{grohs2019spacetime}
Grohs, P., Hornung, F., Jentzen, A., and Zimmermann, P. (2023).
\newblock Space-time error estimates for deep neural network approximations for
differential equations.
\newblock {\em Advances in Computational Mathematics}, 49(1):4.
\bibitem[Grohs et~al., 2022]{Grohs_2022}
Grohs, P., Jentzen, A., and Salimova, D. (2022).
\newblock Deep neural network approximations for solutions of {PDEs} based on
monte carlo algorithms.
\newblock {\em Partial Differential Equations and Applications}, 3(4).
\bibitem[Gy{\"o}ngy and Krylov, 1996]{Gyngy1996ExistenceOS}
Gy{\"o}ngy, I. and Krylov, N.~V. (1996).
\newblock Existence of strong solutions for {It\^o}'s stochastic equations via
approximations.
\newblock {\em Probability Theory and Related Fields}, 105:143--158.
\bibitem[Hutzenthaler et~al., 2020a]{hutzenthaler_overcoming_2020}
Hutzenthaler, M., Jentzen, A., Kruse, T., Anh~Nguyen, T., and von
Wurstemberger, P. (2020a).
\newblock Overcoming the curse of dimensionality in the numerical approximation
of semilinear parabolic partial differential equations.
\newblock {\em Proceedings of the Royal Society A: Mathematical, Physical and
Engineering Sciences}, 476(2244):20190630.
\bibitem[Hutzenthaler et~al., 2021]{hutzenthaler_strong_2021}
Hutzenthaler, M., Jentzen, A., Kuckuck, B., and Padgett, J.~L. (2021).
\newblock Strong {$L^p$}-error analysis of nonlinear {Monte} {Carlo}
approximations for high-dimensional semilinear partial differential
equations.
\newblock Technical Report arXiv:2110.08297, arXiv.
\newblock arXiv:2110.08297 [cs, math] type: article.
\bibitem[Hutzenthaler et~al., 2020b]{hjw2020}
Hutzenthaler, M., Jentzen, A., and von Wurstemberger~Wurstemberger (2020b).
\newblock {Overcoming the curse of dimensionality in the approximative pricing
of financial derivatives with default risks}.
\newblock {\em Electronic Journal of Probability}, 25(none):1 -- 73.
\bibitem[It\^o, 1942a]{Ito1942a}
It\^o, K. (1942a).
\newblock Differential equations determining {Markov} processes (original in
{Japanese}).
\newblock {\em Zenkoku Shijo Sugaku Danwakai}, 244(1077):1352--1400.
\bibitem[It\^o, 1942b]{Ito1946}
It\^o, K. (1942b).
\newblock On a stochastic integral equation.
\newblock {\em Proc. Imperial Acad. Tokyo}, 244(1077):1352--1400.
\bibitem[Karatzas and Shreve, 1991]{karatzas1991brownian}
Karatzas, I. and Shreve, S. (1991).
\newblock {\em Brownian Motion and Stochastic Calculus}.
\newblock Graduate Texts in Mathematics (113) (Book 113). Springer New York.
\bibitem[Rio, 2009]{rio_moment_2009}
Rio, E. (2009).
\newblock Moment {Inequalities} for {Sums} of {Dependent} {Random} {Variables}
under {Projective} {Conditions}.
\newblock {\em J Theor Probab}, 22(1):146--163.
\end{thebibliography}

View File

@ -294,13 +294,12 @@ Publisher: Nature Publishing Group},
file = {Full Text PDF:/Users/shakilrafi/Zotero/storage/EKLDKE65/Tsaban et al. - 2022 - Harnessing protein folding neural networks for pep.pdf:application/pdf},
}
@article{davies_signature_2021,
@article{davies_signature_2022,
title = {The signature and cusp geometry of hyperbolic knots},
author={Alex Davies and Andr'as Juh'asz and Marc Lackenby and Nenad Tomasev},
journal={ArXiv},
year={2021},
volume={abs/2111.15323},
url={https://api.semanticscholar.org/CorpusID:244729717}
journal = {Geometry and Topology},
author = {Davies, A and Juhasz, A and Lackenby, M and Tomasev, N},
year = {2022},
note = {Publisher: Mathematical Sciences Publishers},
}
@article{zhao_space-based_2023,
@ -581,6 +580,19 @@ archivePrefix = {arXiv},
url = {https://dplyr.tidyverse.org},
}
@INPROCEEDINGS{nn_diff,
author={Berner, Julius and Elbrächter, Dennis and Grohs, Philipp and Jentzen, Arnulf},
booktitle={2019 13th International conference on Sampling Theory and Applications (SampTA)},
title={Towards a regularity theory for ReLU networks chain rule and global error estimates},
year={2019},
volume={},
number={},
pages={1-5},
keywords={Neural networks;Standards;Approximation methods;Machine learning;Partial differential equations;Level set},
doi={10.1109/SampTA45681.2019.9031005}}
@Book{ggplot2,
author = {Hadley Wickham},
title = {ggplot2: Elegant Graphics for Data Analysis},
@ -685,6 +697,125 @@ year = {2021}
}
@article{schwab_deep_2019,
title = {Deep learning in high dimension: {Neural} network expression rates for generalized polynomial chaos expansions in {UQ}},
volume = {17},
issn = {0219-5305},
shorttitle = {Deep learning in high dimension},
url = {https://www.worldscientific.com/doi/abs/10.1142/S0219530518500203},
doi = {10.1142/S0219530518500203},
abstract = {We estimate the expressive power of certain deep neural networks (DNNs for short) on a class of countably-parametric, holomorphic maps
𝑢:𝑈→ℝ
𝑢
:
𝑈
on the parameter domain
𝑈=
[1,1]
𝑈
=
[
1
,
1
]
. Dimension-independent rates of best
𝑛
𝑛
-term truncations of generalized polynomial chaos (gpc for short) approximations depend only on the summability exponent of the sequence of their gpc expansion coefficients. So-called
(𝑏,𝜀)
(
𝑏
,
𝜀
)
-holomorphic maps
𝑢
𝑢
, with
𝑏∈
𝑝
𝑏
𝑝
for some
𝑝∈(0,1)
𝑝
(
0
,
1
)
, are known to allow gpc expansions with coefficient sequences in
𝑝
𝑝
. Such maps arise for example as response surfaces of parametric PDEs, with applications in PDE uncertainty quantification (UQ) for many mathematical models in engineering and the sciences. Up to logarithmic terms, we establish the dimension independent approximation rate
𝑠=1/𝑝1
𝑠
=
1
/
𝑝
1
for these functions in terms of the total number
𝑁
𝑁
of units and weights in the DNN. It follows that certain DNN architectures can overcome the curse of dimensionality when expressing possibly countably-parametric, real-valued maps with a certain degree of sparsity in the sequences of their gpc expansion coefficients. We also obtain rates of expressive power of DNNs for countably-parametric maps
𝑢:𝑈→𝑉
𝑢
:
𝑈
𝑉
, where
𝑉
𝑉
is the Hilbert space
𝐻
1
0
([0,1])
𝐻
0
1
(
[
0
,
1
]
)
.},
number = {01},
urldate = {2024-03-07},
journal = {Anal. Appl.},
author = {Schwab, Christoph and Zech, Jakob},
month = jan,
year = {2019},
note = {Publisher: World Scientific Publishing Co.},
keywords = {deep networks, Generalized polynomial chaos, sparse grids, uncertainty quantification},
pages = {19--55},
}
@book{Goodfellow-et-al-2016,
title={Deep Learning},
author={Ian Goodfellow and Yoshua Bengio and Aaron Courville},
publisher={MIT Press},
note={\url{http://www.deeplearningbook.org}},
year={2016}
}

View File

@ -1,55 +1,48 @@
This is BibTeX, Version 0.99d (TeX Live 2023)
Capacity: max_strings=200000, hash_size=200000, hash_prime=170003
The top-level auxiliary file: main.aux
A level-1 auxiliary file: Introduction.aux
A level-1 auxiliary file: Brownian_motion_monte_carlo.aux
A level-1 auxiliary file: u_visc_sol.aux
A level-1 auxiliary file: brownian_motion_monte_carlo_non_linear_case.aux
A level-1 auxiliary file: neural_network_introduction.aux
A level-1 auxiliary file: ann_product.aux
A level-1 auxiliary file: modified_mlp_associated_nn.aux
A level-1 auxiliary file: categorical_neural_network.aux
A level-1 auxiliary file: ann_rep_brownian_motion_monte_carlo.aux
The style file: apalike.bst
Database file #1: Dissertation.bib
You've used 19 entries,
1935 wiz_defined-function locations,
608 strings with 7874 characters,
and the built_in function-call counts, 7185 in all, are:
= -- 699
> -- 297
< -- 12
+ -- 102
- -- 98
* -- 611
:= -- 1285
add.period$ -- 63
call.type$ -- 19
change.case$ -- 139
chr.to.int$ -- 15
cite$ -- 19
duplicate$ -- 274
empty$ -- 495
format.name$ -- 125
if$ -- 1401
int.to.chr$ -- 5
The top-level auxiliary file: /Users/shakilrafi/Library/Mobile Documents/com~apple~CloudDocs/Dissertation/main.aux
I found no \citation commands---while reading file /Users/shakilrafi/Library/Mobile Documents/com~apple~CloudDocs/Dissertation/main.aux
I found no \bibdata command---while reading file /Users/shakilrafi/Library/Mobile Documents/com~apple~CloudDocs/Dissertation/main.aux
I found no \bibstyle command---while reading file /Users/shakilrafi/Library/Mobile Documents/com~apple~CloudDocs/Dissertation/main.aux
You've used 0 entries,
0 wiz_defined-function locations,
83 strings with 634 characters,
and the built_in function-call counts, 0 in all, are:
= -- 0
> -- 0
< -- 0
+ -- 0
- -- 0
* -- 0
:= -- 0
add.period$ -- 0
call.type$ -- 0
change.case$ -- 0
chr.to.int$ -- 0
cite$ -- 0
duplicate$ -- 0
empty$ -- 0
format.name$ -- 0
if$ -- 0
int.to.chr$ -- 0
int.to.str$ -- 0
missing$ -- 22
newline$ -- 99
num.names$ -- 57
pop$ -- 116
preamble$ -- 1
purify$ -- 144
missing$ -- 0
newline$ -- 0
num.names$ -- 0
pop$ -- 0
preamble$ -- 0
purify$ -- 0
quote$ -- 0
skip$ -- 206
skip$ -- 0
stack$ -- 0
substring$ -- 438
swap$ -- 32
text.length$ -- 3
substring$ -- 0
swap$ -- 0
text.length$ -- 0
text.prefix$ -- 0
top$ -- 0
type$ -- 104
type$ -- 0
warning$ -- 0
while$ -- 52
while$ -- 0
width$ -- 0
write$ -- 252
write$ -- 0
(There were 3 error messages)

File diff suppressed because it is too large Load Diff

View File

@ -1,58 +0,0 @@
\BOOKMARK [-1][-]{part.1}{\376\377\000I\000\040\000O\000n\000\040\000C\000o\000n\000v\000e\000r\000g\000e\000n\000c\000e\000\040\000o\000f\000\040\000B\000r\000o\000w\000n\000i\000a\000n\000\040\000M\000o\000t\000i\000o\000n\000\040\000M\000o\000n\000t\000e\000\040\000C\000a\000r\000l\000o}{}% 1
\BOOKMARK [0][-]{chapter.1}{\376\377\000I\000n\000t\000r\000o\000d\000u\000c\000t\000i\000o\000n\000.}{part.1}% 2
\BOOKMARK [1][-]{section.1.1}{\376\377\000N\000o\000t\000a\000t\000i\000o\000n\000,\000\040\000D\000e\000f\000i\000n\000i\000t\000i\000o\000n\000s\000\040\000\046\000\040\000B\000a\000s\000i\000c\000\040\000n\000o\000t\000i\000o\000n\000s\000.}{chapter.1}% 3
\BOOKMARK [2][-]{subsection.1.1.1}{\376\377\000N\000o\000r\000m\000s\000\040\000a\000n\000d\000\040\000I\000n\000n\000e\000r\000\040\000P\000r\000o\000d\000u\000c\000t}{section.1.1}% 4
\BOOKMARK [2][-]{subsection.1.1.2}{\376\377\000P\000r\000o\000b\000a\000b\000i\000l\000i\000t\000y\000\040\000S\000p\000a\000c\000e\000\040\000a\000n\000d\000\040\000B\000r\000o\000w\000n\000i\000a\000n\000\040\000M\000o\000t\000i\000o\000n}{section.1.1}% 5
\BOOKMARK [2][-]{subsection.1.1.3}{\376\377\000L\000i\000p\000s\000c\000h\000i\000t\000z\000\040\000a\000n\000d\000\040\000R\000e\000l\000a\000t\000e\000d\000\040\000N\000o\000t\000i\000o\000n\000s}{section.1.1}% 6
\BOOKMARK [2][-]{subsection.1.1.4}{\376\377\000K\000o\000l\000m\000o\000g\000o\000r\000o\000v\000\040\000E\000q\000u\000a\000t\000i\000o\000n\000s}{section.1.1}% 7
\BOOKMARK [2][-]{subsection.1.1.5}{\376\377\000L\000i\000n\000e\000a\000r\000\040\000A\000l\000g\000e\000b\000r\000a\000\040\000N\000o\000t\000a\000t\000i\000o\000n\000\040\000a\000n\000d\000\040\000D\000e\000f\000i\000n\000i\000t\000i\000o\000n\000s}{section.1.1}% 8
\BOOKMARK [2][-]{subsection.1.1.6}{\376\377\000O\000-\000t\000y\000p\000e\000\040\000n\000o\000t\000a\000t\000i\000o\000n\000\040\000a\000n\000d\000\040\000f\000u\000n\000c\000t\000i\000o\000n\000\040\000g\000r\000o\000w\000t\000h}{section.1.1}% 9
\BOOKMARK [2][-]{subsection.1.1.7}{\376\377\000T\000h\000e\000\040\000I\000v\000e\000r\000s\000o\000n\000\040\000B\000r\000a\000c\000k\000e\000t}{section.1.1}% 10
\BOOKMARK [0][-]{chapter.2}{\376\377\000B\000r\000o\000w\000n\000i\000a\000n\000\040\000M\000o\000t\000i\000o\000n\000\040\000M\000o\000n\000t\000e\000\040\000C\000a\000r\000l\000o}{part.1}% 11
\BOOKMARK [1][-]{section.2.1}{\376\377\000B\000r\000o\000w\000n\000i\000a\000n\000\040\000M\000o\000t\000i\000o\000n\000\040\000P\000r\000e\000l\000i\000m\000i\000n\000a\000r\000i\000e\000s}{chapter.2}% 12
\BOOKMARK [1][-]{section.2.2}{\376\377\000M\000o\000n\000t\000e\000\040\000C\000a\000r\000l\000o\000\040\000A\000p\000p\000r\000o\000x\000i\000m\000a\000t\000i\000o\000n\000s}{chapter.2}% 13
\BOOKMARK [1][-]{section.2.3}{\376\377\000B\000o\000u\000n\000d\000s\000\040\000a\000n\000d\000\040\000C\000o\000v\000n\000v\000e\000r\000g\000e\000n\000c\000e}{chapter.2}% 14
\BOOKMARK [0][-]{chapter.3}{\376\377\000T\000h\000a\000t\000\040\000u\000\040\000i\000s\000\040\000a\000\040\000v\000i\000s\000c\000o\000s\000i\000t\000y\000\040\000s\000o\000l\000u\000t\000i\000o\000n}{part.1}% 15
\BOOKMARK [1][-]{section.3.1}{\376\377\000S\000o\000m\000e\000\040\000P\000r\000e\000l\000i\000m\000i\000n\000a\000r\000i\000e\000s}{chapter.3}% 16
\BOOKMARK [1][-]{section.3.2}{\376\377\000V\000i\000s\000c\000o\000s\000i\000t\000y\000\040\000S\000o\000l\000u\000t\000i\000o\000n\000s}{chapter.3}% 17
\BOOKMARK [1][-]{section.3.3}{\376\377\000S\000o\000l\000u\000t\000i\000o\000n\000s\000,\000\040\000c\000h\000a\000r\000a\000c\000t\000e\000r\000i\000z\000a\000t\000i\000o\000n\000,\000\040\000a\000n\000d\000\040\000c\000o\000m\000p\000u\000t\000a\000t\000i\000o\000n\000a\000l\000\040\000b\000o\000u\000n\000d\000s\000\040\000t\000o\000\040\000t\000h\000e\000\040\000K\000o\000l\000m\000o\000g\000o\000r\000o\000v\000\040\000b\000a\000c\000k\000w\000a\000r\000d\000\040\000e\000q\000u\000a\000t\000i\000o\000n\000s}{chapter.3}% 18
\BOOKMARK [0][-]{chapter.4}{\376\377\000B\000r\000o\000w\000n\000i\000a\000n\000\040\000m\000o\000t\000i\000o\000n\000\040\000M\000o\000n\000t\000e\000\040\000C\000a\000r\000l\000o\000\040\000o\000f\000\040\000t\000h\000e\000\040\000n\000o\000n\000-\000l\000i\000n\000e\000a\000r\000\040\000c\000a\000s\000e}{part.1}% 19
\BOOKMARK [-1][-]{part.2}{\376\377\000I\000I\000\040\000A\000\040\000S\000t\000r\000u\000c\000t\000u\000r\000a\000l\000\040\000D\000e\000s\000c\000r\000i\000p\000t\000i\000o\000n\000\040\000o\000f\000\040\000A\000r\000t\000i\000f\000i\000c\000i\000a\000l\000\040\000N\000e\000u\000r\000a\000l\000\040\000N\000e\000t\000w\000o\000r\000k\000s}{}% 20
\BOOKMARK [0][-]{chapter.5}{\376\377\000I\000n\000t\000r\000o\000d\000u\000c\000t\000i\000o\000n\000\040\000a\000n\000d\000\040\000B\000a\000s\000i\000c\000\040\000N\000o\000t\000i\000o\000n\000s\000\040\000a\000b\000o\000u\000t\000\040\000N\000e\000u\000r\000a\000l\000\040\000N\000e\000t\000w\000o\000r\000k\000s}{part.2}% 21
\BOOKMARK [1][-]{section.5.1}{\376\377\000T\000h\000e\000\040\000B\000a\000s\000i\000c\000\040\000D\000e\000f\000i\000n\000i\000t\000i\000o\000n\000\040\000o\000f\000\040\000A\000N\000N\000s}{chapter.5}% 22
\BOOKMARK [1][-]{section.5.2}{\376\377\000C\000o\000m\000p\000o\000s\000i\000t\000i\000o\000n\000\040\000a\000n\000d\000\040\000e\000x\000t\000e\000n\000s\000i\000o\000n\000s\000\040\000o\000f\000\040\000A\000N\000N\000s}{chapter.5}% 23
\BOOKMARK [2][-]{subsection.5.2.1}{\376\377\000C\000o\000m\000p\000o\000s\000i\000t\000i\000o\000n}{section.5.2}% 24
\BOOKMARK [2][-]{subsection.5.2.2}{\376\377\000E\000x\000t\000e\000n\000s\000i\000o\000n\000s}{section.5.2}% 25
\BOOKMARK [1][-]{section.5.3}{\376\377\000P\000a\000r\000a\000l\000l\000e\000l\000i\000z\000a\000t\000i\000o\000n\000\040\000o\000f\000\040\000A\000N\000N\000s}{chapter.5}% 26
\BOOKMARK [1][-]{section.5.4}{\376\377\000A\000f\000f\000i\000n\000e\000\040\000L\000i\000n\000e\000a\000r\000\040\000T\000r\000a\000n\000s\000f\000o\000r\000m\000a\000t\000i\000o\000n\000s\000\040\000a\000s\000\040\000A\000N\000N\000s}{chapter.5}% 27
\BOOKMARK [1][-]{section.5.5}{\376\377\000S\000u\000m\000s\000\040\000o\000f\000\040\000A\000N\000N\000s}{chapter.5}% 28
\BOOKMARK [2][-]{subsection.5.5.1}{\376\377\000N\000e\000u\000r\000a\000l\000\040\000N\000e\000t\000w\000o\000r\000k\000\040\000S\000u\000m\000\040\000P\000r\000o\000p\000e\000r\000t\000i\000e\000s}{section.5.5}% 29
\BOOKMARK [1][-]{section.5.6}{\376\377\000L\000i\000n\000e\000a\000r\000\040\000C\000o\000m\000b\000i\000n\000a\000t\000i\000o\000n\000s\000\040\000o\000f\000\040\000A\000N\000N\000s}{chapter.5}% 30
\BOOKMARK [1][-]{section.5.7}{\376\377\000N\000e\000u\000r\000a\000l\000\040\000N\000e\000t\000w\000o\000r\000k\000\040\000D\000i\000a\000g\000r\000a\000m\000s}{chapter.5}% 31
\BOOKMARK [0][-]{chapter.6}{\376\377\000A\000N\000N\000\040\000P\000r\000o\000d\000u\000c\000t\000\040\000A\000p\000p\000r\000o\000x\000i\000m\000a\000t\000i\000o\000n\000s}{part.2}% 32
\BOOKMARK [1][-]{section.6.1}{\376\377\000A\000p\000p\000r\000o\000x\000i\000m\000a\000t\000i\000o\000n\000\040\000f\000o\000r\000\040\000s\000i\000m\000p\000l\000e\000\040\000p\000r\000o\000d\000u\000c\000t\000s}{chapter.6}% 33
\BOOKMARK [2][-]{subsection.6.1.1}{\376\377\000T\000h\000e\000\040\000`\0003\003\001\0009\000`\0004\0002\000`\000"\003\007\0006\0001\0003\000A\000`\000`\0004\0005\000`\0004\0007\000`\000"\0006\0000\0003\000A\000P\000r\000d\000\040\000n\000e\000t\000w\000o\000r\000k}{section.6.1}% 34
\BOOKMARK [1][-]{section.6.2}{\376\377\000H\000i\000g\000h\000e\000r\000\040\000A\000p\000p\000r\000o\000x\000i\000m\000a\000t\000i\000o\000n\000s}{chapter.6}% 35
\BOOKMARK [2][-]{subsection.6.2.1}{\376\377\000T\000h\000e\000\040\000`\0003\003\001\0009\000`\0004\0002\000`\000"\003\007\0006\0001\0003\000A\000`\000`\0004\0005\000`\0004\0007\000`\000"\0006\0000\0003\000A\000T\000u\000n\000\040\000N\000e\000u\000r\000a\000l\000\040\000N\000e\000t\000w\000o\000r\000k}{section.6.2}% 36
\BOOKMARK [2][-]{subsection.6.2.2}{\376\377\000T\000h\000e\000\040\000`\0003\003\001\0009\000`\0004\0002\000`\000"\003\007\0006\0001\0003\000A\000`\000`\0004\0005\000`\0004\0007\000`\000"\0006\0000\0003\000A\000P\000w\000r\000\040\000N\000e\000u\000r\000a\000l\000\040\000N\000e\000t\000w\000o\000r\000k\000s}{section.6.2}% 37
\BOOKMARK [2][-]{subsection.6.2.3}{\376\377\000T\000h\000e\000\040\000`\0003\003\001\0009\000`\0004\0002\000`\000"\003\007\0006\0001\0003\000A\000`\000`\0004\0005\000`\0004\0007\000`\000"\0006\0000\0003\000A\000T\000a\000y\000\040\000n\000e\000u\000r\000a\000l\000\040\000n\000e\000t\000w\000o\000r\000k}{section.6.2}% 38
\BOOKMARK [2][-]{subsection.6.2.4}{\376\377\000N\000e\000u\000r\000a\000l\000\040\000n\000e\000t\000w\000o\000r\000k\000\040\000a\000p\000p\000r\000o\000x\000i\000m\000a\000t\000i\000o\000n\000s\000\040\000f\000o\000r\000\040\000e\000x\000.}{section.6.2}% 39
\BOOKMARK [0][-]{chapter.7}{\376\377\000A\000\040\000m\000o\000d\000i\000f\000i\000e\000d\000\040\000M\000u\000l\000t\000i\000-\000L\000e\000v\000e\000l\000\040\000P\000i\000c\000a\000r\000d\000\040\000a\000n\000d\000\040\000a\000s\000s\000o\000c\000i\000a\000t\000e\000d\000\040\000n\000e\000u\000r\000a\000l\000\040\000n\000e\000t\000w\000o\000r\000k}{part.2}% 40
\BOOKMARK [0][-]{chapter.8}{\376\377\000S\000o\000m\000e\000\040\000c\000a\000t\000e\000g\000o\000r\000i\000c\000a\000l\000\040\000i\000d\000e\000a\000s\000\040\000a\000b\000o\000u\000t\000\040\000n\000e\000u\000r\000a\000l\000\040\000n\000e\000t\000w\000o\000r\000k\000s}{part.2}% 41
\BOOKMARK [0][-]{chapter.9}{\376\377\000A\000N\000N\000\040\000f\000i\000r\000s\000t\000\040\000a\000p\000p\000r\000o\000x\000i\000m\000a\000t\000i\000o\000n\000s}{part.2}% 42
\BOOKMARK [1][-]{section.9.1}{\376\377\000A\000c\000t\000i\000v\000a\000t\000i\000o\000n\000\040\000F\000u\000n\000c\000t\000i\000o\000n\000\040\000a\000s\000\040\000N\000e\000u\000r\000a\000l\000\040\000N\000e\000t\000w\000o\000r\000k\000s}{chapter.9}% 43
\BOOKMARK [1][-]{section.9.2}{\376\377\000A\000N\000N\000\040\000R\000e\000p\000r\000e\000s\000e\000n\000t\000a\000t\000i\000o\000n\000s\000\040\000f\000o\000r\000\040\000O\000n\000e\000-\000D\000i\000m\000e\000n\000s\000i\000o\000n\000a\000l\000\040\000I\000d\000e\000n\000t\000i\000t\000y}{chapter.9}% 44
\BOOKMARK [1][-]{section.9.3}{\376\377\000M\000o\000d\000u\000l\000u\000s\000\040\000o\000f\000\040\000C\000o\000n\000t\000i\000n\000u\000i\000t\000y}{chapter.9}% 45
\BOOKMARK [1][-]{section.9.4}{\376\377\000L\000i\000n\000e\000a\000r\000\040\000I\000n\000t\000e\000r\000p\000o\000l\000a\000t\000i\000o\000n\000\040\000o\000f\000\040\000r\000e\000a\000l\000-\000v\000a\000l\000u\000e\000d\000\040\000f\000u\000n\000c\000t\000i\000o\000n\000s}{chapter.9}% 46
\BOOKMARK [2][-]{subsection.9.4.1}{\376\377\000T\000h\000e\000\040\000L\000i\000n\000e\000a\000r\000\040\000I\000n\000t\000e\000r\000p\000o\000l\000a\000t\000i\000o\000n\000\040\000O\000p\000e\000r\000a\000t\000o\000r}{section.9.4}% 47
\BOOKMARK [2][-]{subsection.9.4.2}{\376\377\000N\000e\000u\000r\000a\000l\000\040\000N\000e\000t\000w\000o\000r\000k\000s\000\040\000t\000o\000\040\000a\000p\000p\000r\000o\000x\000i\000m\000a\000t\000e\000\040\000t\000h\000e\000\040\000`\0003\003\001\0009\000`\0004\0002\000`\000"\003\007\0006\0001\0003\000A\000`\000`\0004\0005\000`\0004\0007\000`\000"\0006\0000\0003\000A\000L\000i\000n\000\040\000o\000p\000e\000r\000a\000t\000o\000r}{section.9.4}% 48
\BOOKMARK [1][-]{section.9.5}{\376\377\000N\000e\000u\000r\000a\000l\000\040\000n\000e\000t\000w\000o\000r\000k\000\040\000a\000p\000p\000r\000o\000x\000i\000m\000a\000t\000i\000o\000n\000\040\000o\000f\000\040\0001\000-\000d\000i\000m\000e\000n\000s\000i\000o\000n\000a\000l\000\040\000f\000u\000n\000c\000t\000i\000o\000n\000s\000.}{chapter.9}% 49
\BOOKMARK [1][-]{section.9.6}{\376\377\000`\0003\003\001\0009\000`\0004\0002\000`\000"\003\007\0006\0001\0003\000A\000`\000`\0004\0005\000`\0004\0007\000`\000"\0006\0000\0003\000A\000T\000r\000p\000h\000\040\000a\000n\000d\000\040\000n\000e\000u\000r\000a\000l\000\040\000n\000e\000t\000w\000o\000r\000k\000\040\000a\000p\000p\000r\000o\000x\000i\000m\000a\000t\000i\000o\000n\000s\000\040\000f\000o\000r\000\040\000t\000h\000e\000\040\000t\000r\000a\000p\000e\000z\000o\000i\000d\000a\000l\000\040\000r\000u\000l\000e\000.}{chapter.9}% 50
\BOOKMARK [1][-]{section.9.7}{\376\377\000L\000i\000n\000e\000a\000r\000\040\000i\000n\000t\000e\000r\000p\000o\000l\000a\000t\000i\000o\000n\000\040\000f\000o\000r\000\040\000m\000u\000l\000t\000i\000-\000d\000i\000m\000e\000n\000s\000i\000o\000n\000a\000l\000\040\000f\000u\000n\000c\000t\000i\000o\000n\000s}{chapter.9}% 51
\BOOKMARK [2][-]{subsection.9.7.1}{\376\377\000T\000h\000e\000\040\000`\0003\003\001\0009\000`\0004\0002\000`\000"\003\007\0006\0001\0003\000A\000`\000`\0004\0005\000`\0004\0007\000`\000"\0006\0000\0003\000A\000N\000r\000m\000d\0001\000\040\000a\000n\000d\000\040\000`\0003\003\001\0009\000`\0004\0002\000`\000"\003\007\0006\0001\0003\000A\000`\000`\0004\0005\000`\0004\0007\000`\000"\0006\0000\0003\000A\000M\000x\000m\000d\000\040\000n\000e\000t\000w\000o\000r\000k\000s}{section.9.7}% 52
\BOOKMARK [2][-]{subsection.9.7.2}{\376\377\000T\000h\000e\000\040\000`\0003\003\001\0009\000`\0004\0002\000`\000"\003\007\0006\0001\0003\000A\000`\000`\0004\0005\000`\0004\0007\000`\000"\0006\0000\0003\000A\000M\000x\000m\000d\000\040\000n\000e\000u\000r\000a\000l\000\040\000n\000e\000t\000w\000o\000r\000k\000\040\000a\000n\000d\000\040\000m\000a\000x\000i\000m\000u\000m\000\040\000c\000o\000n\000v\000o\000l\000u\000t\000i\000o\000n\000s\000\040}{section.9.7}% 53
\BOOKMARK [2][-]{subsection.9.7.3}{\376\377\000L\000i\000p\000s\000c\000h\000i\000t\000z\000\040\000f\000u\000n\000c\000t\000i\000o\000n\000\040\000a\000p\000p\000r\000o\000x\000i\000m\000a\000t\000i\000o\000n\000s}{section.9.7}% 54
\BOOKMARK [2][-]{subsection.9.7.4}{\376\377\000E\000x\000p\000l\000i\000c\000i\000t\000\040\000A\000N\000N\000\040\000a\000p\000p\000r\000o\000x\000i\000m\000a\000t\000i\000o\000n\000s\000\040}{section.9.7}% 55
\BOOKMARK [-1][-]{part.3}{\376\377\000I\000I\000I\000\040\000A\000\040\000d\000e\000e\000p\000-\000l\000e\000a\000r\000n\000i\000n\000g\000\040\000s\000o\000l\000u\000t\000i\000o\000n\000\040\000f\000o\000r\000\040\000u\000\040\000a\000n\000d\000\040\000B\000r\000o\000w\000n\000i\000a\000n\000\040\000m\000o\000t\000i\000o\000n\000s}{}% 56
\BOOKMARK [0][-]{chapter.10}{\376\377\000A\000N\000N\000\040\000r\000e\000p\000r\000e\000s\000e\000n\000t\000a\000t\000i\000o\000n\000s\000\040\000o\000f\000\040\000B\000r\000o\000w\000n\000i\000a\000n\000\040\000M\000o\000t\000i\000o\000n\000\040\000M\000o\000n\000t\000e\000\040\000C\000a\000r\000l\000o}{part.3}% 57
\BOOKMARK [0][-]{section*.3}{\376\377\000A\000p\000p\000e\000n\000d\000i\000c\000e\000s}{part.3}% 58

Binary file not shown.

Binary file not shown.

View File

@ -1,22 +1,20 @@
\include{preamble}
\include{commands}
\title{Artificial Neural Networks Applied to Stochastic Monte Carlo as a Way to Approximate Modified Heat Equations, and Their Associated Parameters, Depths, and Accuracies.}
\author{Shakil Rafi}
\begin{document}
\maketitle
\include{front_matter}
\tableofcontents
\part{On Convergence of Brownian Motion Monte Carlo}
\setcounter{page}{1}
\include{Introduction}
\include{Brownian_motion_monte_carlo}
\include{u_visc_sol}
\include{brownian_motion_monte_carlo_non_linear_case}
%\include{brownian_motion_monte_carlo_non_linear_case}
\part{A Structural Description of Artificial Neural Networks}
@ -34,11 +32,14 @@
\include{conclusions-further-research}
\nocite{*}
%\nocite{*}
\singlespacing
\bibliography{main.bib}
\bibliographystyle{apa}
\include{back_matter}
\include{appendices}
\end{document}

View File

@ -1,58 +0,0 @@
\contentsline {part}{I\hspace {1em}On Convergence of Brownian Motion Monte Carlo}{4}{part.1}%
\contentsline {chapter}{\numberline {1}Introduction.}{5}{chapter.1}%
\contentsline {section}{\numberline {1.1}Notation, Definitions \& Basic notions.}{5}{section.1.1}%
\contentsline {subsection}{\numberline {1.1.1}Norms and Inner Product}{5}{subsection.1.1.1}%
\contentsline {subsection}{\numberline {1.1.2}Probability Space and Brownian Motion}{6}{subsection.1.1.2}%
\contentsline {subsection}{\numberline {1.1.3}Lipschitz and Related Notions}{9}{subsection.1.1.3}%
\contentsline {subsection}{\numberline {1.1.4}Kolmogorov Equations}{10}{subsection.1.1.4}%
\contentsline {subsection}{\numberline {1.1.5}Linear Algebra Notation and Definitions}{12}{subsection.1.1.5}%
\contentsline {subsection}{\numberline {1.1.6}$O$-type notation and function growth}{13}{subsection.1.1.6}%
\contentsline {subsection}{\numberline {1.1.7}The Iverson Bracket}{15}{subsection.1.1.7}%
\contentsline {chapter}{\numberline {2}Brownian Motion Monte Carlo}{16}{chapter.2}%
\contentsline {section}{\numberline {2.1}Brownian Motion Preliminaries}{16}{section.2.1}%
\contentsline {section}{\numberline {2.2}Monte Carlo Approximations}{20}{section.2.2}%
\contentsline {section}{\numberline {2.3}Bounds and Covnvergence}{21}{section.2.3}%
\contentsline {chapter}{\numberline {3}That $u$ is a viscosity solution}{30}{chapter.3}%
\contentsline {section}{\numberline {3.1}Some Preliminaries}{30}{section.3.1}%
\contentsline {section}{\numberline {3.2}Viscosity Solutions}{34}{section.3.2}%
\contentsline {section}{\numberline {3.3}Solutions, characterization, and computational bounds to the Kolmogorov backward equations}{53}{section.3.3}%
\contentsline {chapter}{\numberline {4}Brownian motion Monte Carlo of the non-linear case}{59}{chapter.4}%
\contentsline {part}{II\hspace {1em}A Structural Description of Artificial Neural Networks}{61}{part.2}%
\contentsline {chapter}{\numberline {5}Introduction and Basic Notions about Neural Networks}{62}{chapter.5}%
\contentsline {section}{\numberline {5.1}The Basic Definition of ANNs}{62}{section.5.1}%
\contentsline {section}{\numberline {5.2}Composition and extensions of ANNs}{66}{section.5.2}%
\contentsline {subsection}{\numberline {5.2.1}Composition}{66}{subsection.5.2.1}%
\contentsline {subsection}{\numberline {5.2.2}Extensions}{68}{subsection.5.2.2}%
\contentsline {section}{\numberline {5.3}Parallelization of ANNs}{68}{section.5.3}%
\contentsline {section}{\numberline {5.4}Affine Linear Transformations as ANNs}{72}{section.5.4}%
\contentsline {section}{\numberline {5.5}Sums of ANNs}{75}{section.5.5}%
\contentsline {subsection}{\numberline {5.5.1}Neural Network Sum Properties}{76}{subsection.5.5.1}%
\contentsline {section}{\numberline {5.6}Linear Combinations of ANNs}{83}{section.5.6}%
\contentsline {section}{\numberline {5.7}Neural Network Diagrams}{93}{section.5.7}%
\contentsline {chapter}{\numberline {6}ANN Product Approximations}{95}{chapter.6}%
\contentsline {section}{\numberline {6.1}Approximation for simple products}{95}{section.6.1}%
\contentsline {subsection}{\numberline {6.1.1}The $\prd $ network}{106}{subsection.6.1.1}%
\contentsline {section}{\numberline {6.2}Higher Approximations}{111}{section.6.2}%
\contentsline {subsection}{\numberline {6.2.1}The $\tun $ Neural Network}{112}{subsection.6.2.1}%
\contentsline {subsection}{\numberline {6.2.2}The $\pwr $ Neural Networks}{114}{subsection.6.2.2}%
\contentsline {subsection}{\numberline {6.2.3}The $\tay $ neural network}{123}{subsection.6.2.3}%
\contentsline {subsection}{\numberline {6.2.4}Neural network approximations for $e^x$.}{128}{subsection.6.2.4}%
\contentsline {chapter}{\numberline {7}A modified Multi-Level Picard and associated neural network}{129}{chapter.7}%
\contentsline {chapter}{\numberline {8}Some categorical ideas about neural networks}{132}{chapter.8}%
\contentsline {chapter}{\numberline {9}ANN first approximations}{133}{chapter.9}%
\contentsline {section}{\numberline {9.1}Activation Function as Neural Networks}{133}{section.9.1}%
\contentsline {section}{\numberline {9.2}ANN Representations for One-Dimensional Identity}{134}{section.9.2}%
\contentsline {section}{\numberline {9.3}Modulus of Continuity}{142}{section.9.3}%
\contentsline {section}{\numberline {9.4}Linear Interpolation of real-valued functions}{143}{section.9.4}%
\contentsline {subsection}{\numberline {9.4.1}The Linear Interpolation Operator}{143}{subsection.9.4.1}%
\contentsline {subsection}{\numberline {9.4.2}Neural Networks to approximate the $\lin $ operator}{144}{subsection.9.4.2}%
\contentsline {section}{\numberline {9.5}Neural network approximation of 1-dimensional functions.}{148}{section.9.5}%
\contentsline {section}{\numberline {9.6}$\trp ^h$ and neural network approximations for the trapezoidal rule.}{151}{section.9.6}%
\contentsline {section}{\numberline {9.7}Linear interpolation for multi-dimensional functions}{154}{section.9.7}%
\contentsline {subsection}{\numberline {9.7.1}The $\nrm ^d_1$ and $\mxm ^d$ networks}{154}{subsection.9.7.1}%
\contentsline {subsection}{\numberline {9.7.2}The $\mxm ^d$ neural network and maximum convolutions }{160}{subsection.9.7.2}%
\contentsline {subsection}{\numberline {9.7.3}Lipschitz function approximations}{164}{subsection.9.7.3}%
\contentsline {subsection}{\numberline {9.7.4}Explicit ANN approximations }{167}{subsection.9.7.4}%
\contentsline {part}{III\hspace {1em}A deep-learning solution for $u$ and Brownian motions}{169}{part.3}%
\contentsline {chapter}{\numberline {10}ANN representations of Brownian Motion Monte Carlo}{170}{chapter.10}%
\contentsline {chapter}{Appendices}{180}{section*.3}%

View File

@ -33,7 +33,7 @@ We now look at neural networks in the context of multi-level Picard iterations.
\begin{lemma}
Let $\Theta = \lp \bigcup^{n\in \N} \Z^n \rp$, $d,M \in \N$, $T\in \lp 0,\infty \rp$, $f \in C \lp \R, \R \rp$, $g,\in C \lp \R^d, \R \rp$, $\mathsf{F}, \mathsf{G} \in \neu$ satisfy that $\real_{\rect} \lp \mathsf{F} \rp = f$ and $\real_{\rect} \lp \mathsf{G} \rp = g$, let $\mathfrak{u}^\theta \in \lb 0,1 \rb$, $\theta \in \Theta$, and $\mathcal{U}^\theta: \lb 0,T \rb \rightarrow \lb 0,T \rb$, $\theta \in \Theta$, satisfy for all $t \in \lb 0,T \rb$, $theta \in \Theta$ that $\mathcal{U}^\theta_t = t+(T-t)\mathfrak{u}^\theta$, let $\mathcal{W}^\theta: \lb 0,T \rb \rightarrow \R^d$, $\theta \in \Theta$, for every $\theta \in \Theta$, $t\in \lb 0,T\rb$, $s \in \lb t,T\rb$, let $\mathcal{Y}^\theta_{t,s} \in \R$ satisfy $\mathcal{Y}^\theta_{t,s} = \mathcal{W}^\theta_s - \mathcal{W}^\theta_t$ and let $\mathcal{U}^\theta_n: \lb 0,T\rb \times \R^d \rightarrow \R$, $n\in \N_0$, $\theta \in \Theta$, satisfy for all $\theta \in \Theta$, $n\in \N_0$, $t\in \lb 0,T\rb$, $x\in \R^d$ that:
\begin{align}
U^\theta_n \lp t,x\rp &= \frac{\mathbbm{1}_\N\lp n \rp}{M^n} \lb \sum^{M^n}_{k=1} g \lp x + \mathcal{Y}^{(\theta,0,-k)}_{t,T}\rp\rb \nonumber\\
&U^\theta_n \lp t,x\rp = \frac{\mathbbm{1}_\N\lp n \rp}{M^n} \lb \sum^{M^n}_{k=1} g \lp x + \mathcal{Y}^{(\theta,0,-k)}_{t,T}\rp\rb \nonumber\\
&+ \sum^{n-1}_{i=0} \frac{T-t}{M^{n-i}} \lb \sum^{M^{n-i}}_{k=1} \lp \lp f \circ U^{(\theta,i,k)}_i\rp - \mathbbm{1}_\N \lp i \rp \lp f \circ U^{(\theta,-i,k)}_{\max \{ i-1,0\}} \rp \rp \lp \mathcal{U}^{(\theta,i,k)}_t,x+ \mathcal{Y}^{(\theta,i,k)}_{t,\mathcal{U}_t^{(\theta,i,k)}}\rp\rb
\end{align}
it is then the case that:
@ -46,7 +46,7 @@ We now look at neural networks in the context of multi-level Picard iterations.
% \boxplus_{\mathbb{I}} \lb \boxplus^{n-1}_{i=0,\mathbb{I}} \lb \lp \frac{T-t}{M^{n-i}} \rp \circledast \lp \boxplus^{M^{n-i}}_{k=1,\mathbb{I}} \lp \lp \mathsf{F} \bullet \mathsf{U}^{(\theta,i,k}_{i, \mathcal{U}_t^{(\theta,i,k)} \rp \rp \rp
% \end{align}
\begin{align}
\mathsf{U}^\theta_{n,t} &= \lb \bigoplus^{M^n}_{k=1} \lp \frac{1}{M^n} \circledast \lp \mathsf{G}\bullet \aff_{\mathbb{I}_d, \mathcal{Y}^{(\theta,0,-k}_{t,T}} \rp \rp \rb \nonumber \\
&\mathsf{U}^\theta_{n,t} \\&= \lb \bigoplus^{M^n}_{k=1} \lp \frac{1}{M^n} \circledast \lp \mathsf{G}\bullet \aff_{\mathbb{I}_d, \mathcal{Y}^{(\theta,0,-k}_{t,T}} \rp \rp \rb \nonumber \\
&\boxplus_{\mathbb{I}} \lb \boxplus^{n-1}_{i=0,\mathbb{I}} \lb \lp \frac{T-t}{M^{n-i}}\rp \circledast \lp \boxplus^{M^{n-i}}_{k=1,\mathbb{I}}\lp \lp \mathsf{F} \bullet \mathsf{U}^{(\theta,i,k)}_{i,\mathcal{U}_t^{(\theta,i,k)}} \rp \bullet \aff_{\mathbb{I}_d}, \mathcal{Y}^{(\theta,i,k)}_{t,\mathcal{U}_t^{(\theta,i,k)}} \rp\rp \rb\rb \nonumber\\
&\boxplus_{\mathbb{I}} \lb \boxplus^{n-1}_{i=0,\mathbb{I}} \lb \lp \frac{(t-T)\mathbbm{1}_\N}{M^{n-i}}\rp \circledast\lp \boxplus^{M^{n-i}}_{k=1,\mathbb{I}} \lp \lp \mathsf{F} \bullet \mathsf{U}^{(\theta,-i,k)}_{\max \{i-1,0\}, \mathcal{U}_t^{(\theta,i,k)}}\rp \bullet \aff_{\mathbb{I}_d,\mathcal{Y}^{(\theta,i,k)}_{t,\mathcal{U}_t^{(\theta,i,k)}}} \rp \rp\rb \rb
\end{align}

View File

@ -799,11 +799,11 @@ Affine neural networks present an important class of neural networks. By virtue
\lp \begin{bmatrix}
W_2 & 0\\
0 & W'_2
\end{bmatrix} \begin{bmatrix}
\end{bmatrix} ,\begin{bmatrix}
b_2 \\
b_2'
\end{bmatrix} \rp \right.,..., \nonumber
\left. \lp \begin{bmatrix}
\end{bmatrix} \rp \right.,..., \nonumber \\
&\left. \lp \begin{bmatrix}
\mathbb{I}_{\out(\nu_2)} \: \mathbb{I}_{\out(\nu_2)}
\end{bmatrix}\begin{bmatrix}
W_L & 0 \\
@ -1329,7 +1329,7 @@ Affine neural networks present an important class of neural networks. By virtue
\end{proof}
\begin{lemma}\label{5.6.9}
Let $L \in \N$, $u,v \in \Z$ with $u\leqslant v$. Let $c_u, c_{u+1},...,c_v \in \R$. $\nu_u, \nu_{u+1},...,\nu_v, \mu \in \neu$, $B_u, B_{u+1},...,B_v \in \R^{\inn(\nu_u)}$, $\act \in C\lp \R, \R \rp$, satisfy for all $j \in \N \cap [u,v]$ that $L = \max_{i\in \N \cap \lb u,v \rb} \dep(\nu_i)$, $\inn(\nu_j) = \inn(\nu_u)$, $\out(\nu_j) = \inn(\mathfrak{I})= \out(\mathfrak{I})$, $\hid(\mathfrak{I}) = 1$, $\real_{\act} (\mathfrak{I}) = \mathbb{I}_\R$, and that:
Let $L \in \N$, $u,v \in \Z$ with $u\leqslant v$. Let $c_u, c_{u+1},...,c_v \in \R$. $\nu_u, \nu_{u+1},...,\nu_v, \mu \in \neu$, $B_u, B_{u+1},...,B_v \in \R^{\inn(\nu_u)}$, $\act \in C\lp \R, \R \rp$, satisfy for all $j \in \N \cap [u,v]$ that $L = \max_{i\in \N \cap \lb u,v \rb}\\ \dep(\nu_i)$, $\inn(\nu_j) = \inn(\nu_u)$, $\out(\nu_j) = \inn(\mathfrak{I})= \out(\mathfrak{I})$, $\hid(\mathfrak{I}) = 1$, $\real_{\act} (\mathfrak{I}) = \mathbb{I}_\R$, and that:
\begin{align}
\mu = \dplus^v_{i = u, \mathfrak{I}} \lp c_i \triangleright \lp \nu_i \bullet \aff_{\mathbb{I}_{\inn(\nu_i), },b_i} \rp \rp
\end{align}

View File

@ -0,0 +1 @@
\relax

View File

@ -17,6 +17,9 @@
bottom=1in
}
\usepackage[T1]{fontenc}
\usepackage[utf8]{inputenc}
\usepackage[]{soul}
\usepackage[]{bbm}
\usepackage[]{cancel}
@ -71,7 +74,7 @@
numbersep = 5pt,
breakatwhitespace=false,
tabsize=4,
morekeywords={install.packages, library, ggplot, aes, geom_bar}
morekeywords={install.packages, library, ggplot, aes, geom_bar, source, }
}
\usepackage{fontspec}

View File

@ -1,4 +1,5 @@
\relax
\providecommand\zref@newlabel[2]{}
\providecommand\hyper@newdestlabel[2]{}
\citation{crandall_lions}
\citation{Beck_2021}
@ -6,194 +7,66 @@
\citation{Ito1946}
\citation{Beck_2021}
\citation{BHJ21}
\@writefile{toc}{\contentsline {chapter}{\numberline {3}That $u$ is a viscosity solution}{30}{chapter.3}\protected@file@percent }
\@writefile{toc}{\contentsline {chapter}{\numberline {3}That $u$ is a Viscosity Solution}{35}{chapter.3}\protected@file@percent }
\@writefile{lof}{\addvspace {10\p@ }}
\@writefile{lot}{\addvspace {10\p@ }}
\@writefile{toc}{\contentsline {section}{\numberline {3.1}Some Preliminaries}{30}{section.3.1}\protected@file@percent }
\newlabel{lemma:2.7}{{3.1.1}{30}{}{theorem.3.1.1}{}}
\newlabel{lemma:2.7@cref}{{[lemma][1][3,1]3.1.1}{[1][30][]30}}
\@writefile{toc}{\contentsline {section}{\numberline {3.1}Some Preliminaries}{35}{section.3.1}\protected@file@percent }
\newlabel{lemma:2.7}{{3.1.1}{35}{}{theorem.3.1.1}{}}
\newlabel{lemma:2.7@cref}{{[lemma][1][3,1]3.1.1}{[1][35][]35}}
\citation{karatzas1991brownian}
\citation{karatzas1991brownian}
\newlabel{lem:3.4}{{3.1.2}{32}{}{theorem.3.1.2}{}}
\newlabel{lem:3.4@cref}{{[lemma][2][3,1]3.1.2}{[1][32][]32}}
\newlabel{2.13}{{3.1.7}{32}{}{equation.3.1.7}{}}
\newlabel{2.13@cref}{{[equation][7][3,1]3.1.7}{[1][32][]32}}
\newlabel{2.14}{{3.1.8}{32}{}{equation.3.1.8}{}}
\newlabel{2.14@cref}{{[equation][8][3,1]3.1.8}{[1][32][]32}}
\newlabel{lem:3.4}{{3.1.2}{37}{}{theorem.3.1.2}{}}
\newlabel{lem:3.4@cref}{{[lemma][2][3,1]3.1.2}{[1][37][]37}}
\newlabel{2.13}{{3.7}{37}{}{equation.3.1.7}{}}
\newlabel{2.13@cref}{{[equation][7][3,1]3.7}{[1][37][]37}}
\citation{da_prato_zabczyk_2002}
\newlabel{2.14}{{3.8}{38}{}{equation.3.1.8}{}}
\newlabel{2.14@cref}{{[equation][8][3,1]3.8}{[1][37][]38}}
\newlabel{(2.19)}{{3.10}{38}{Some Preliminaries}{equation.3.1.10}{}}
\newlabel{(2.19)@cref}{{[equation][10][3,1]3.10}{[1][38][]38}}
\citation{karatzas1991brownian}
\newlabel{(2.19)}{{3.1.10}{33}{Some Preliminaries}{equation.3.1.10}{}}
\newlabel{(2.19)@cref}{{[equation][10][3,1]3.1.10}{[1][33][]33}}
\citation{da_prato_zabczyk_2002}
\@writefile{toc}{\contentsline {section}{\numberline {3.2}Viscosity Solutions}{34}{section.3.2}\protected@file@percent }
\newlabel{sumofusc}{{3.2.3.1}{34}{}{corollary.3.2.3.1}{}}
\newlabel{sumofusc@cref}{{[corollary][1][]3.2.3.1}{[1][34][]34}}
\newlabel{neglsc}{{3.2.3.2}{35}{}{corollary.3.2.3.2}{}}
\newlabel{neglsc@cref}{{[corollary][2][]3.2.3.2}{[1][35][]35}}
\newlabel{negdegel}{{3.2.6}{36}{}{theorem.3.2.6}{}}
\newlabel{negdegel@cref}{{[lemma][6][3,2]3.2.6}{[1][36][]36}}
\newlabel{def:viscsubsolution}{{3.2.7}{36}{Viscosity subsolutions}{theorem.3.2.7}{}}
\newlabel{def:viscsubsolution@cref}{{[definition][7][3,2]3.2.7}{[1][36][]36}}
\newlabel{def:viscsupsolution}{{3.2.8}{36}{Viscosity supersolutions}{theorem.3.2.8}{}}
\newlabel{def:viscsupsolution@cref}{{[definition][8][3,2]3.2.8}{[1][36][]36}}
\newlabel{def:viscsolution}{{3.2.9}{37}{Viscosity solution}{theorem.3.2.9}{}}
\newlabel{def:viscsolution@cref}{{[definition][9][3,2]3.2.9}{[1][37][]37}}
\newlabel{maxviscosity}{{3.2.10}{37}{}{theorem.3.2.10}{}}
\newlabel{maxviscosity@cref}{{[lemma][10][3,2]3.2.10}{[1][37][]37}}
\newlabel{ungeq0}{{3.2.11}{38}{}{theorem.3.2.11}{}}
\newlabel{ungeq0@cref}{{[lemma][11][3,2]3.2.11}{[1][38][]38}}
\newlabel{limitofun}{{3.2.13}{38}{}{equation.3.2.13}{}}
\newlabel{limitofun@cref}{{[equation][13][3,2]3.2.13}{[1][38][]38}}
\newlabel{hessungeq0}{{3.2.15}{38}{}{equation.3.2.15}{}}
\newlabel{hessungeq0@cref}{{[equation][15][3,2]3.2.15}{[1][38][]38}}
\newlabel{phieps}{{3.2.16}{38}{Viscosity Solutions}{equation.3.2.16}{}}
\newlabel{phieps@cref}{{[equation][16][3,2]3.2.16}{[1][38][]38}}
\newlabel{maxphiu}{{3.2.20}{39}{Viscosity Solutions}{equation.3.2.20}{}}
\newlabel{maxphiu@cref}{{[equation][20][3,2]3.2.20}{[1][39][]39}}
\newlabel{unleq0}{{3.2.11.1}{40}{}{corollary.3.2.11.1}{}}
\newlabel{unleq0@cref}{{[corollary][1][]3.2.11.1}{[1][40][]40}}
\newlabel{limitofun}{{3.2.27}{41}{}{equation.3.2.27}{}}
\newlabel{limitofun@cref}{{[equation][27][3,2]3.2.27}{[1][41][]41}}
\newlabel{viscsolutionvn}{{3.2.28}{41}{}{equation.3.2.28}{}}
\newlabel{viscsolutionvn@cref}{{[equation][28][3,2]3.2.28}{[1][41][]41}}
\newlabel{hessungeq0}{{3.2.29}{41}{}{equation.3.2.29}{}}
\newlabel{hessungeq0@cref}{{[equation][29][3,2]3.2.29}{[1][41][]41}}
\newlabel{hgeq0}{{3.2.30}{41}{Viscosity Solutions}{equation.3.2.30}{}}
\newlabel{hgeq0@cref}{{[equation][30][3,2]3.2.30}{[1][41][]41}}
\newlabel{unneq0}{{3.2.11.2}{42}{}{corollary.3.2.11.2}{}}
\newlabel{unneq0@cref}{{[corollary][2][]3.2.11.2}{[1][42][]42}}
\newlabel{absq}{{3.2.12}{42}{}{theorem.3.2.12}{}}
\newlabel{absq@cref}{{[lemma][12][3,2]3.2.12}{[1][42][]42}}
\citation{karatzas1991brownian}
\newlabel{ugoesto0}{{3.2.13}{43}{}{theorem.3.2.13}{}}
\newlabel{ugoesto0@cref}{{[lemma][13][3,2]3.2.13}{[1][43][]43}}
\newlabel{limsupis0}{{3.2.39}{43}{}{equation.3.2.39}{}}
\newlabel{limsupis0@cref}{{[equation][39][3,2]3.2.39}{[1][43][]43}}
\newlabel{xnasintuvxn}{{3.2.40}{43}{}{equation.3.2.40}{}}
\newlabel{xnasintuvxn@cref}{{[equation][40][3,2]3.2.40}{[1][43][]43}}
\newlabel{lipformun}{{3.2.42}{43}{Viscosity Solutions}{equation.3.2.42}{}}
\newlabel{lipformun@cref}{{[equation][42][3,2]3.2.42}{[1][43][]43}}
\newlabel{expofxisbounded}{{3.2.43}{43}{Viscosity Solutions}{equation.3.2.43}{}}
\newlabel{expofxisbounded@cref}{{[equation][43][3,2]3.2.43}{[1][43][]43}}
\newlabel{mathcalxn-mathclx0}{{3.2.44}{44}{Viscosity Solutions}{equation.3.2.44}{}}
\newlabel{mathcalxn-mathclx0@cref}{{[equation][44][3,2]3.2.44}{[1][43][]44}}
\newlabel{2.19}{{3.2.14}{45}{}{theorem.3.2.14}{}}
\newlabel{2.19@cref}{{[lemma][14][3,2]3.2.14}{[1][45][]45}}
\newlabel{2.59}{{3.2.50}{46}{}{equation.3.2.50}{}}
\newlabel{2.59@cref}{{[equation][50][3,2]3.2.50}{[1][46][]46}}
\newlabel{2.60}{{3.2.51}{46}{}{equation.3.2.51}{}}
\newlabel{2.60@cref}{{[equation][51][3,2]3.2.51}{[1][46][]46}}
\newlabel{2.62}{{3.2.53}{46}{Viscosity Solutions}{equation.3.2.53}{}}
\newlabel{2.62@cref}{{[equation][53][3,2]3.2.53}{[1][46][]46}}
\newlabel{2.63}{{3.2.54}{46}{Viscosity Solutions}{equation.3.2.54}{}}
\newlabel{2.63@cref}{{[equation][54][3,2]3.2.54}{[1][46][]46}}
\newlabel{2.64}{{3.2.55}{46}{Viscosity Solutions}{equation.3.2.55}{}}
\newlabel{2.64@cref}{{[equation][55][3,2]3.2.55}{[1][46][]46}}
\citation{BHJ21}
\citation{Beck_2021}
\newlabel{2.65}{{3.2.56}{47}{Viscosity Solutions}{equation.3.2.56}{}}
\newlabel{2.65@cref}{{[equation][56][3,2]3.2.56}{[1][46][]47}}
\newlabel{2.66}{{3.2.57}{47}{Viscosity Solutions}{equation.3.2.57}{}}
\newlabel{2.66@cref}{{[equation][57][3,2]3.2.57}{[1][47][]47}}
\newlabel{ungn}{{3.2.58}{47}{Viscosity Solutions}{equation.3.2.58}{}}
\newlabel{ungn@cref}{{[equation][58][3,2]3.2.58}{[1][47][]47}}
\newlabel{u0gn}{{3.2.59}{47}{Viscosity Solutions}{equation.3.2.59}{}}
\newlabel{u0gn@cref}{{[equation][59][3,2]3.2.59}{[1][47][]47}}
\newlabel{2.70}{{3.2.61}{47}{Viscosity Solutions}{equation.3.2.61}{}}
\newlabel{2.70@cref}{{[equation][61][3,2]3.2.61}{[1][47][]47}}
\newlabel{unu0}{{3.2.62}{48}{Viscosity Solutions}{equation.3.2.62}{}}
\newlabel{unu0@cref}{{[equation][62][3,2]3.2.62}{[1][48][]48}}
\newlabel{2.73}{{3.2.64}{48}{Viscosity Solutions}{equation.3.2.64}{}}
\newlabel{2.73@cref}{{[equation][64][3,2]3.2.64}{[1][48][]48}}
\newlabel{2.74}{{3.2.65}{48}{Viscosity Solutions}{equation.3.2.65}{}}
\newlabel{2.74@cref}{{[equation][65][3,2]3.2.65}{[1][48][]48}}
\newlabel{2.76}{{3.2.67}{49}{Viscosity Solutions}{equation.3.2.67}{}}
\newlabel{2.76@cref}{{[equation][67][3,2]3.2.67}{[1][48][]49}}
\newlabel{2.79}{{3.2.71}{49}{}{equation.3.2.71}{}}
\newlabel{2.79@cref}{{[equation][71][3,2]3.2.71}{[1][49][]49}}
\newlabel{2.81}{{3.2.73}{50}{}{equation.3.2.73}{}}
\newlabel{2.81@cref}{{[equation][73][3,2]3.2.73}{[1][50][]50}}
\newlabel{2.82}{{3.2.74}{50}{Viscosity Solutions}{equation.3.2.74}{}}
\newlabel{2.82@cref}{{[equation][74][3,2]3.2.74}{[1][50][]50}}
\newlabel{2.84}{{3.2.76}{50}{Viscosity Solutions}{equation.3.2.76}{}}
\newlabel{2.84@cref}{{[equation][76][3,2]3.2.76}{[1][50][]50}}
\citation{Beck_2021}
\citation{Beck_2021}
\newlabel{2.86}{{3.2.78}{51}{Viscosity Solutions}{equation.3.2.78}{}}
\newlabel{2.86@cref}{{[equation][78][3,2]3.2.78}{[1][50][]51}}
\newlabel{2.88}{{3.2.80}{51}{Viscosity Solutions}{equation.3.2.80}{}}
\newlabel{2.88@cref}{{[equation][80][3,2]3.2.80}{[1][51][]51}}
\newlabel{2.89}{{3.2.81}{51}{Viscosity Solutions}{equation.3.2.81}{}}
\newlabel{2.89@cref}{{[equation][81][3,2]3.2.81}{[1][51][]51}}
\newlabel{2.90}{{3.2.83}{52}{Viscosity Solutions}{equation.3.2.83}{}}
\newlabel{2.90@cref}{{[equation][83][3,2]3.2.83}{[1][52][]52}}
\newlabel{2.93}{{3.2.85}{52}{Viscosity Solutions}{equation.3.2.85}{}}
\newlabel{2.93@cref}{{[equation][85][3,2]3.2.85}{[1][52][]52}}
\newlabel{2.95}{{3.2.87}{53}{Viscosity Solutions}{equation.3.2.87}{}}
\newlabel{2.95@cref}{{[equation][87][3,2]3.2.87}{[1][52][]53}}
\@writefile{toc}{\contentsline {section}{\numberline {3.3}Solutions, characterization, and computational bounds to the Kolmogorov backward equations}{53}{section.3.3}\protected@file@percent }
\newlabel{thm:3.21}{{3.3.1}{53}{Existence and characterization of $u_d$}{theorem.3.3.1}{}}
\newlabel{thm:3.21@cref}{{[theorem][1][3,3]3.3.1}{[1][53][]53}}
\newlabel{3.3.1}{{3.3.1}{53}{Existence and characterization of $u_d$}{equation.3.3.1}{}}
\newlabel{3.3.1@cref}{{[equation][1][3,3]3.3.1}{[1][53][]53}}
\newlabel{3.102}{{3.3.2}{53}{Existence and characterization of $u_d$}{equation.3.3.2}{}}
\newlabel{3.102@cref}{{[equation][2][3,3]3.3.2}{[1][53][]53}}
\newlabel{lem:3.19}{{3.3.1.1}{54}{}{corollary.3.3.1.1}{}}
\newlabel{lem:3.19@cref}{{[corollary][1][]3.3.1.1}{[1][54][]54}}
\newlabel{3.3.2}{{3.3.2}{54}{}{theorem.3.3.2}{}}
\newlabel{3.3.2@cref}{{[lemma][2][3,3]3.3.2}{[1][54][]54}}
\newlabel{3.3.7}{{3.3.7}{54}{}{equation.3.3.7}{}}
\newlabel{3.3.7@cref}{{[equation][7][3,3]3.3.7}{[1][54][]54}}
\newlabel{3.3.12}{{3.3.12}{55}{Solutions, characterization, and computational bounds to the Kolmogorov backward equations}{equation.3.3.12}{}}
\newlabel{3.3.12@cref}{{[equation][12][3,3]3.3.12}{[1][55][]55}}
\newlabel{3.3.13}{{3.3.13}{55}{Solutions, characterization, and computational bounds to the Kolmogorov backward equations}{equation.3.3.13}{}}
\newlabel{3.3.13@cref}{{[equation][13][3,3]3.3.13}{[1][55][]55}}
\newlabel{3.3.15}{{3.3.15}{56}{Solutions, characterization, and computational bounds to the Kolmogorov backward equations}{equation.3.3.15}{}}
\newlabel{3.3.15@cref}{{[equation][15][3,3]3.3.15}{[1][56][]56}}
\citation{bhj20}
\newlabel{3.3.5}{{3.3.5}{57}{}{theorem.3.3.5}{}}
\newlabel{3.3.5@cref}{{[claim][5][3,3]3.3.5}{[1][57][]57}}
\newlabel{3.2.21}{{3.3.18}{57}{Solutions, characterization, and computational bounds to the Kolmogorov backward equations}{equation.3.3.18}{}}
\newlabel{3.2.21@cref}{{[equation][18][3,3]3.3.18}{[1][57][]57}}
\newlabel{3.3.20}{{3.3.20}{58}{Solutions, characterization, and computational bounds to the Kolmogorov backward equations}{equation.3.3.20}{}}
\newlabel{3.3.20@cref}{{[equation][20][3,3]3.3.20}{[1][58][]58}}
\@setckpt{u_visc_sol}{
\setcounter{page}{59}
\setcounter{equation}{20}
\setcounter{enumi}{3}
\setcounter{enumii}{0}
\setcounter{enumiii}{0}
\setcounter{enumiv}{0}
\setcounter{footnote}{0}
\setcounter{mpfootnote}{0}
\setcounter{part}{1}
\setcounter{chapter}{3}
\setcounter{section}{3}
\setcounter{subsection}{0}
\setcounter{subsubsection}{0}
\setcounter{paragraph}{0}
\setcounter{subparagraph}{0}
\setcounter{figure}{0}
\setcounter{table}{0}
\setcounter{@pps}{0}
\setcounter{@ppsavesec}{0}
\setcounter{@ppsaveapp}{0}
\setcounter{AM@survey}{0}
\setcounter{parentequation}{0}
\setcounter{section@level}{1}
\setcounter{Item}{55}
\setcounter{Hfootnote}{0}
\setcounter{bookmark@seq@number}{18}
\setcounter{NAT@ctr}{0}
\setcounter{ALG@line}{0}
\setcounter{ALG@rem}{0}
\setcounter{ALG@nested}{0}
\setcounter{ALG@Lnr}{1}
\setcounter{ALG@blocknr}{1}
\setcounter{ALG@storecount}{0}
\setcounter{ALG@tmpcounter}{0}
\setcounter{lstnumber}{1}
\setcounter{theorem}{5}
\setcounter{corollary}{0}
\setcounter{lstlisting}{0}
}
\@writefile{toc}{\contentsline {section}{\numberline {3.2}Viscosity Solutions}{40}{section.3.2}\protected@file@percent }
\newlabel{sumofusc}{{3.2.3.1}{40}{}{corollary.3.2.3.1}{}}
\newlabel{sumofusc@cref}{{[corollary][1][]3.2.3.1}{[1][40][]40}}
\newlabel{neglsc}{{3.2.3.2}{40}{}{corollary.3.2.3.2}{}}
\newlabel{neglsc@cref}{{[corollary][2][]3.2.3.2}{[1][40][]40}}
\newlabel{negdegel}{{3.2.6}{41}{}{theorem.3.2.6}{}}
\newlabel{negdegel@cref}{{[lemma][6][3,2]3.2.6}{[1][41][]41}}
\newlabel{def:viscsubsolution}{{3.2.7}{42}{Viscosity subsolutions}{theorem.3.2.7}{}}
\newlabel{def:viscsubsolution@cref}{{[definition][7][3,2]3.2.7}{[1][42][]42}}
\newlabel{def:viscsupsolution}{{3.2.8}{42}{Viscosity supersolutions}{theorem.3.2.8}{}}
\newlabel{def:viscsupsolution@cref}{{[definition][8][3,2]3.2.8}{[1][42][]42}}
\newlabel{def:viscsolution}{{3.2.9}{43}{Viscosity solution}{theorem.3.2.9}{}}
\newlabel{def:viscsolution@cref}{{[definition][9][3,2]3.2.9}{[1][43][]43}}
\newlabel{maxviscosity}{{3.2.10}{43}{}{theorem.3.2.10}{}}
\newlabel{maxviscosity@cref}{{[lemma][10][3,2]3.2.10}{[1][43][]43}}
\newlabel{ungeq0}{{3.2.11}{44}{}{theorem.3.2.11}{}}
\newlabel{ungeq0@cref}{{[lemma][11][3,2]3.2.11}{[1][44][]44}}
\newlabel{limitofun}{{3.13}{44}{}{equation.3.2.13}{}}
\newlabel{limitofun@cref}{{[equation][13][3,2]3.13}{[1][44][]44}}
\newlabel{hessungeq0}{{3.15}{44}{}{equation.3.2.15}{}}
\newlabel{hessungeq0@cref}{{[equation][15][3,2]3.15}{[1][44][]44}}
\newlabel{phieps}{{3.16}{44}{Viscosity Solutions}{equation.3.2.16}{}}
\newlabel{phieps@cref}{{[equation][16][3,2]3.16}{[1][44][]44}}
\newlabel{maxphiu}{{3.20}{45}{Viscosity Solutions}{equation.3.2.20}{}}
\newlabel{maxphiu@cref}{{[equation][20][3,2]3.20}{[1][45][]45}}
\newlabel{unleq0}{{3.2.11.1}{47}{}{corollary.3.2.11.1}{}}
\newlabel{unleq0@cref}{{[corollary][1][]3.2.11.1}{[1][47][]47}}
\newlabel{limitofun}{{3.27}{47}{}{equation.3.2.27}{}}
\newlabel{limitofun@cref}{{[equation][27][3,2]3.27}{[1][47][]47}}
\newlabel{viscsolutionvn}{{3.28}{47}{}{equation.3.2.28}{}}
\newlabel{viscsolutionvn@cref}{{[equation][28][3,2]3.28}{[1][47][]47}}
\newlabel{hessungeq0}{{3.29}{47}{}{equation.3.2.29}{}}
\newlabel{hessungeq0@cref}{{[equation][29][3,2]3.29}{[1][47][]47}}
\newlabel{hgeq0}{{3.30}{47}{Viscosity Solutions}{equation.3.2.30}{}}
\newlabel{hgeq0@cref}{{[equation][30][3,2]3.30}{[1][47][]47}}
\newlabel{unneq0}{{3.2.11.2}{48}{}{corollary.3.2.11.2}{}}
\newlabel{unneq0@cref}{{[corollary][2][]3.2.11.2}{[1][48][]48}}
\newlabel{absq}{{3.2.12}{49}{}{theorem.3.2.12}{}}
\newlabel{absq@cref}{{[lemma][12][3,2]3.2.12}{[1][49][]49}}
\newlabel{ugoesto0}{{3.2.13}{49}{}{theorem.3.2.13}{}}
\newlabel{ugoesto0@cref}{{[lemma][13][3,2]3.2.13}{[1][49][]49}}
\newlabel{limsupis0}{{3.39}{49}{}{equation.3.2.39}{}}
\newlabel{limsupis0@cref}{{[equation][39][3,2]3.39}{[1][49][]49}}
\citation{karatzas1991brow

View File

@ -576,7 +576,7 @@ Taken together these prove the corollary.
\begin{align}\label{limsupis0}
\limsup_{n \rightarrow \infty} \lb \sup_{t\in[0,T]} \sup_{x\in \mathcal{O}} \lp \right\|\mu_n(t,x) - \mu_0(t,x)\left\|_E + \left\|\sigma_n(t,x)-\sigma_0(t,x)\right\|_F \rp \rb = 0
\end{align}
Let $\lp \Omega, \mathcal{F}, \mathbb{R} \rp$ be a stochastic basis and let $W: [0,T] \times \Omega \rightarrow \R^m$ be a standard $(\mathbb{F}_t)_{t\in [0,T]}$-Brownian motion for every $t\in [0,T]$, $x \in \mathcal{O}$, let $\mathcal{X}^{t,x} = (\mathcal{X}^{t,x}_s)_{s\in [t,T]}: [t,T] \times \Omega \rightarrow \R^d$ be an $(\mathbb{F}_s)_{s\in [t,T]}$ adapted stochastic process with continuous sample paths, satisfying for all $s \in [t, T]$ we have $\mathbb{P}$-a.s.
Let $\lp \Omega, \mathcal{F}, \mathbb{R} \rp$ be a stochastic basis and let $W: [0,T] \times \Omega \rightarrow \R^m$ be a standard\\ $(\mathbb{F}_t)_{t\in [0,T]}$-Brownian motion for every $t\in [0,T]$, $x \in \mathcal{O}$, let $\mathcal{X}^{t,x} = (\mathcal{X}^{t,x}_s)_{s\in [t,T]}: [t,T] \times \Omega \rightarrow \R^d$ be an $(\mathbb{F}_s)_{s\in [t,T]}$ adapted stochastic process with continuous sample paths, satisfying for all $s \in [t, T]$ we have $\mathbb{P}$-a.s.
\begin{align}\label{xnasintuvxn}
\mathcal{X}^{n,t,x}_s = x + \int^s_t \mu_n(r,\mathcal{X}^{n,t,x}_s) dr + \int^s_t \sigma_n(r,\mathcal{X}^{n,t,x}_r) dW_r
\end{align}
@ -626,7 +626,7 @@ Taken together these prove the corollary.
\end{align}
However assumption (\ref{lipformun}) then gives us that for all $n\in \N$, $t \in [0,T]$, $s \in [t,T]$, and $x \in \mathcal{O}$ that:
\begin{align}
\E \lb \left\|\mathcal{X}^{n,t,x}_s - \mathcal{X}^{0,t,x}_s \right\|_E^2 \rb &\leqslant 4L^2(T+1) \int^s_t\E\lb \left\|\mathcal{X}^{n,t,x}_r-\mathcal{X}^{0,t,x}_r \right\|_E^2 \rb dr \nonumber \\
&\E \lb \left\|\mathcal{X}^{n,t,x}_s - \mathcal{X}^{0,t,x}_s \right\|_E^2 \rb \leqslant 4L^2(T+1) \int^s_t\E\lb \left\|\mathcal{X}^{n,t,x}_r-\mathcal{X}^{0,t,x}_r \right\|_E^2 \rb dr \nonumber \\
&+4T(T+1) \lb \sup_{r\in [0,T]}\sup_{y\in \R^d} \lp \left\| \mu_n(r,y) - \mu_0(r,y) \right\|_E^2 + \left\| \sigma_n(r,y) - \sigma_0(r,y) \right\|_F^2 \rp \rb \nonumber
\end{align}