|
|
@ -81,7 +81,7 @@ This proves Item (v) and hence the whole lemma.
|
|
|
|
\end{proof}
|
|
|
|
\end{proof}
|
|
|
|
\section{The $\mathsf{E}^{N,h,q,\ve}_n$ Neural Network}
|
|
|
|
\section{The $\mathsf{E}^{N,h,q,\ve}_n$ Neural Network}
|
|
|
|
\begin{lemma}[R\textemdash, 2023]\label{mathsfE}
|
|
|
|
\begin{lemma}[R\textemdash, 2023]\label{mathsfE}
|
|
|
|
Let $n, N\in \N$ and $h \in \lp 0,\infty\rp$. Let $\delta,\ve \in \lp 0,\infty \rp $, $q\in \lp 2,\infty \rp$, satisfy that $\delta = \ve \lp 2^{q-1} +1\rp^{-1}$. Let $a\in \lp -\infty,\infty \rp$, $b \in \lb a, \infty \rp$. Let $f:[a,b] \rightarrow \R$ be continuous and have second derivatives almost everywhere in $\lb a,b \rb$. Let $a=x_0 \les x_1\les \cdots \les x_{N-1} \les x_N=b$ such that for all $i \in \{0,1,...,N\}$ it is the case that $h = \frac{b-a}{N}$, and $x_i = x_0+i\cdot h$ . Let $x = \lb x_0 \: x_1\: \cdots x_N \rb$ and as such let $f\lp\lb x \rb_{*,*} \rp = \lb f(x_0) \: f(x_1)\: \cdots \: f(x_N) \rb$. Let $\mathsf{E}^{N,h,q,\ve}_{n} \in \neu$ be the neural network given by:
|
|
|
|
Let $n, N\in \N$ and $h \in \lp 0,\infty\rp$. Let $\delta,\ve \in \lp 0,\infty \rp $, $q\in \lp 2,\infty \rp$, satisfy that $\delta = \ve \lp 2^{q-1} +1\rp^{-1}$. Let $a\in \lp -\infty,\infty \rp$, $b \in \lb a, \infty \rp$. Let $f:[a,b] \rightarrow \R$ be continuous and have second derivatives almost everywhere in $\lb a,b \rb$. Let $a=x_0 \les x_1\les \cdots \les x_{N-1} \les x_N=b$ such that for all $i \in \{0,1,...,N\}$ it is the case that $h = \frac{b-a}{N}$, and $x_i = x_0+i\cdot h$ . Let $x = \lb x_0 \: x_1\: \cdots \: x_N \rb$ and as such let $f\lp\lb x \rb_{*,*} \rp = \lb f(x_0) \: f(x_1)\: \cdots \: f(x_N) \rb$. Let $\mathsf{E}^{N,h,q,\ve}_{n} \in \neu$ be the neural network given by:
|
|
|
|
\begin{align}
|
|
|
|
\begin{align}
|
|
|
|
\mathsf{E}^{N,h,q,\ve}_n = \xpn_n^{q,\ve} \bullet \etr^{N,h}
|
|
|
|
\mathsf{E}^{N,h,q,\ve}_n = \xpn_n^{q,\ve} \bullet \etr^{N,h}
|
|
|
|
\end{align}
|
|
|
|
\end{align}
|
|
|
@ -506,7 +506,7 @@ Let $n, N,h\in \N$. Let $\delta,\ve \in \lp 0,\infty \rp $, $q\in \lp 2,\infty \
|
|
|
|
\section{The $\mathsf{UEX}^{N,h,q,\ve}_{n,\mathsf{G}_d,\omega_i}$ network}\label{UEX}
|
|
|
|
\section{The $\mathsf{UEX}^{N,h,q,\ve}_{n,\mathsf{G}_d,\omega_i}$ network}\label{UEX}
|
|
|
|
\begin{lemma}[R\textemdash,2023]\label{UE-prop}
|
|
|
|
\begin{lemma}[R\textemdash,2023]\label{UE-prop}
|
|
|
|
|
|
|
|
|
|
|
|
Let $n, N,h\in \N$. Let $\delta,\ve \in \lp 0,\infty \rp $, $q\in \lp 2,\infty \rp$, satisfy that $\delta = \ve \lp 2^{q-1} +1\rp^{-1}$. Let $a\in \lp -\infty,\infty \rp$, $b \in \lb a, \infty \rp$. Let $f:[a,b] \rightarrow \R$ be continuous and have second derivatives almost everywhere in $\lb a,b \rb$. Let $a=x_0 \les x_1\les \cdots \les x_{N-1} \les x_N=b$ such that for all $i \in \{0,1,...,N\}$ it is the case that $h = \frac{b-a}{N}$, and $x_i = x_0+i\cdot h$ . Let $x = \lb x_0 \: x_1\: \cdots x_N \rb$ and as such let $f\lp\lb x \rb_{*,*} \rp = \lb f(x_0) \: f(x_1)\: \cdots \: f(x_N) \rb$. Let $\mathsf{E}^{\exp}_{n,h,q,\ve} \in \neu$ be the neural network given by:
|
|
|
|
Let $n, N,h\in \N$. Let $\delta,\ve \in \lp 0,\infty \rp $, $q\in \lp 2,\infty \rp$, satisfy that $\delta = \ve \lp 2^{q-1} +1\rp^{-1}$. Let $a\in \lp -\infty,\infty \rp$, $b \in \lb a, \infty \rp$. Let $f:[a,b] \rightarrow \R$ be continuous and have second derivatives almost everywhere in $\lb a,b \rb$. Let $a=x_0 \les x_1\les \cdots \les x_{N-1} \les x_N=b$ such that for all $i \in \{0,1,...,N\}$ it is the case that $h = \frac{b-a}{N}$, and $x_i = x_0+i\cdot h$ . Let $x = \lb x_0 \: x_1\: \cdots \: x_N \rb$ and as such let $f\lp\lb x \rb_{*,*} \rp = \lb f(x_0) \: f(x_1)\: \cdots \: f(x_N) \rb$. Let $\mathsf{E}^{\exp}_{n,h,q,\ve} \in \neu$ be the neural network given by:
|
|
|
|
\begin{align}
|
|
|
|
\begin{align}
|
|
|
|
\mathsf{E}^{N,h,q,\ve}_n = \xpn_n^{q,\ve} \bullet \etr^{N,h}
|
|
|
|
\mathsf{E}^{N,h,q,\ve}_n = \xpn_n^{q,\ve} \bullet \etr^{N,h}
|
|
|
|
\end{align}
|
|
|
|
\end{align}
|
|
|
@ -619,7 +619,7 @@ Note that for a fixed $T \in \lp 0,\infty \rp$ it is the case that $u_d\lp t,x \
|
|
|
|
&\left| \exp \lp \int^T_t \lp \alpha_d \circ \mathcal{X}^{d,t,x}_{r,\omega_i} ds\rp \rp u_d\lp T,\mathcal{X}^{d,t,x}_{\omega_i}\rp - \real_{\rect}\lp \mathsf{UEX}^{N,h,q,\ve}_{n,\mathsf{G}_d,\omega_i}\rp \right|\nonumber\\
|
|
|
|
&\left| \exp \lp \int^T_t \lp \alpha_d \circ \mathcal{X}^{d,t,x}_{r,\omega_i} ds\rp \rp u_d\lp T,\mathcal{X}^{d,t,x}_{\omega_i}\rp - \real_{\rect}\lp \mathsf{UEX}^{N,h,q,\ve}_{n,\mathsf{G}_d,\omega_i}\rp \right|\nonumber\\
|
|
|
|
&\les 3\ve +2\ve \left| u_d\lp T,\mathcal{X}_{r,\omega_i}^{d,t,x}\rp\right|^q+2\ve \left| \exp \lp \int^b_afdx\rp\right|^q + \ve \left| \exp \lp \int^b_afdx\rp - \mathfrak{e}\right|^q -\mathfrak{e}u_d\lp T,\mathcal{X}^{d,t,x}_{r,\omega_i} \rp\nonumber
|
|
|
|
&\les 3\ve +2\ve \left| u_d\lp T,\mathcal{X}_{r,\omega_i}^{d,t,x}\rp\right|^q+2\ve \left| \exp \lp \int^b_afdx\rp\right|^q + \ve \left| \exp \lp \int^b_afdx\rp - \mathfrak{e}\right|^q -\mathfrak{e}u_d\lp T,\mathcal{X}^{d,t,x}_{r,\omega_i} \rp\nonumber
|
|
|
|
\end{align}
|
|
|
|
\end{align}
|
|
|
|
This completes the proof of the lemma.
|
|
|
|
This completes the proof of the Lemma.
|
|
|
|
\end{proof}
|
|
|
|
\end{proof}
|
|
|
|
\begin{remark}
|
|
|
|
\begin{remark}
|
|
|
|
Diagrammatically, this can be represented as:
|
|
|
|
Diagrammatically, this can be represented as:
|
|
|
@ -695,9 +695,15 @@ Note that for a fixed $T \in \lp 0,\infty \rp$ it is the case that $u_d\lp t,x \
|
|
|
|
\end{tikzpicture}
|
|
|
|
\end{tikzpicture}
|
|
|
|
\end{center}
|
|
|
|
\end{center}
|
|
|
|
\end{remark}
|
|
|
|
\end{remark}
|
|
|
|
\section{The $\mathsf{UES}$ network}
|
|
|
|
\section{The $\mathsf{UES}^{N,h,q,\ve}_{n,\mathsf{G}_d,\Omega,\fn}$ network}
|
|
|
|
|
|
|
|
\begin{definition}[The Kahane-Kintchine Constant]
|
|
|
|
|
|
|
|
Let $p,q \in \lp 0,\infty\rp$. We will then denote by $\fK_{p,q}\in \lb 0,\infty\rb$, the extended real number given by:
|
|
|
|
|
|
|
|
\begin{align}
|
|
|
|
|
|
|
|
\fK_{p,q} = \sup \left\{ c \in \lb 0,\infty \rp : \lb \exists \text{ an }\R-\text{Banach Space} \rb \right\}
|
|
|
|
|
|
|
|
\end{align}
|
|
|
|
|
|
|
|
\end{definition}
|
|
|
|
\begin{lemma}\label{lem:sm_sum}
|
|
|
|
\begin{lemma}\label{lem:sm_sum}
|
|
|
|
Let $\nu_1,\nu_2,\hdots, \nu_n \in \neu$ such that for all $i \in \{1,2,\hdots, n\}$ it is the cast that $\out\lp \nu_i\rp = 1$, and it is also the case that $\dep \lp \nu_1 \rp = \dep \lp \nu_2 \rp = \cdots =\dep \lp \nu_n\rp$. Let $x_1 \in \R^{\inn\lp \nu_1\rp},x_2 \in \R^{\inn\lp \nu_2\rp},\hdots x_n \in \R^{\inn\lp \nu_n\rp}$ and $\fx \in \R^{\sum_{i=1}^n \inn \lp \nu_i\rp}$. It is then the case that we have that:
|
|
|
|
Let $\nu_1,\nu_2,\hdots, \nu_n \in \neu$ such that for all $i \in \{1,2,\hdots, n\}$ it is the cast that $\out\lp \nu_i\rp = 1$, and it is also the case that $\dep \lp \nu_1 \rp = \dep \lp \nu_2 \rp = \cdots =\dep \lp \nu_n\rp$. Let $x_1 \in \R^{\inn\lp \nu_1\rp},x_2 \in \R^{\inn\lp \nu_2\rp},\hdots, x_n \in \R^{\inn\lp \nu_n\rp}$ and $\fx \in \R^{\sum_{i=1}^n \inn \lp \nu_i\rp}$. It is then the case that we have that:
|
|
|
|
\begin{align}
|
|
|
|
\begin{align}
|
|
|
|
\real_{\rect}\lp \sm_{n,1} \bullet \lb \boxminus_{i=1}^n \nu_i \rb \rp \lp \fx\rp = \sum^n_{i=1} \real_{\rect} \lp \nu_i\rp \lp x_i\rp
|
|
|
|
\real_{\rect}\lp \sm_{n,1} \bullet \lb \boxminus_{i=1}^n \nu_i \rb \rp \lp \fx\rp = \sum^n_{i=1} \real_{\rect} \lp \nu_i\rp \lp x_i\rp
|
|
|
|
\end{align}
|
|
|
|
\end{align}
|
|
|
@ -738,6 +744,9 @@ Note that for a fixed $T \in \lp 0,\infty \rp$ it is the case that $u_d\lp t,x \
|
|
|
|
\end{align}
|
|
|
|
\end{align}
|
|
|
|
This proves the inductive case and hence the Lemma.
|
|
|
|
This proves the inductive case and hence the Lemma.
|
|
|
|
\end{proof}
|
|
|
|
\end{proof}
|
|
|
|
|
|
|
|
\begin{lemma}
|
|
|
|
|
|
|
|
Let, $\lp \Omega, \mathcal{F}, \mathbb{P}\rp$ be a probability space and let $\mathcal{X}: \Omega \rightarrow \R^d$ be a random variable with $\E\lb\mathcal{X}\rb = \mu < \infty$, and probability density function $\ff_{\cX }$. Let $g: \R^d \rightarrow \R$ be a measurable continuous function. It is then the case that
|
|
|
|
|
|
|
|
\end{lemma}
|
|
|
|
|
|
|
|
|
|
|
|
\begin{lemma}[R\textemdash, 2024, Approximants for Brownian Motion]
|
|
|
|
\begin{lemma}[R\textemdash, 2024, Approximants for Brownian Motion]
|
|
|
|
|
|
|
|
|
|
|
@ -763,7 +772,7 @@ Let $t \in \lp 0,\infty\rp$ and $T \in \lp t,\infty\rp$. Let $\lp \Omega, \mathc
|
|
|
|
|
|
|
|
|
|
|
|
Furthermore, let $\mathsf{UE}^{N,h,q,\ve}_{n, \mathsf{G}_d}\subsetneq \neu$ be neural networks given by:
|
|
|
|
Furthermore, let $\mathsf{UE}^{N,h,q,\ve}_{n, \mathsf{G}_d}\subsetneq \neu$ be neural networks given by:
|
|
|
|
\begin{align}
|
|
|
|
\begin{align}
|
|
|
|
\mathsf{UE}^{N,h,q,\ve}_{n,\mathsf{G}_d} = \prd^{q,\ve} \bullet \lb \mathsf{E}^{N,h,q,\ve}_{n,h,q,\ve} \DDiamond \mathsf{G}_d \rb
|
|
|
|
\mathsf{UE}^{N,h,q,\ve}_{n,\mathsf{G}_d} = \prd^{q,\ve} \bullet \lb \mathsf{E}^{N,h,q,\ve}_{n} \DDiamond \mathsf{G}_d \rb
|
|
|
|
\end{align}
|
|
|
|
\end{align}
|
|
|
|
Futhermore, let $\mathsf{UEX}^{N,h,q,\ve}_{n,\mathsf{G}_d,\omega_i} \subsetneq \neu$ be neural networks given by:
|
|
|
|
Futhermore, let $\mathsf{UEX}^{N,h,q,\ve}_{n,\mathsf{G}_d,\omega_i} \subsetneq \neu$ be neural networks given by:
|
|
|
|
\begin{align}
|
|
|
|
\begin{align}
|
|
|
@ -781,9 +790,9 @@ Let $t \in \lp 0,\infty\rp$ and $T \in \lp t,\infty\rp$. Let $\lp \Omega, \mathc
|
|
|
|
\frac{q}{q-2} \lb \log_2 \lp \ve^{-1}\rp +q \rb +\dep \lp \mathsf{G}_d\rp-1 &:n = 0\\
|
|
|
|
\frac{q}{q-2} \lb \log_2 \lp \ve^{-1}\rp +q \rb +\dep \lp \mathsf{G}_d\rp-1 &:n = 0\\
|
|
|
|
\frac{q}{q-2} \lb \log_2 \lp \ve^{-1}\rp +q \rb +\max\left\{\dep \lp \mathsf{E}^{N,h,q,\ve}_{n}\rp,\dep \lp \mathsf{G}_d\rp\right\}-1 &:n \in \N\\
|
|
|
|
\frac{q}{q-2} \lb \log_2 \lp \ve^{-1}\rp +q \rb +\max\left\{\dep \lp \mathsf{E}^{N,h,q,\ve}_{n}\rp,\dep \lp \mathsf{G}_d\rp\right\}-1 &:n \in \N\\
|
|
|
|
\end{cases}$
|
|
|
|
\end{cases}$
|
|
|
|
\item It is also the case that:\begin{align}
|
|
|
|
\item It is also the case that:
|
|
|
|
\param \lp \mathsf{UES}^{N,h,q,\ve}_{n,\mathsf{G}_d,\Omega, \fn}\rp &\les \param \lp \prd^{q,\ve}\rp + 2\lp\max \left\{\param \lp \mathsf{E}^{N,h,q,\ve}_{n}\rp, \param \lp \mathsf{G}_d\rp \right\}\rp^2 \nonumber\\
|
|
|
|
\begin{align}
|
|
|
|
&+ 8 \max\left\{\lp 1+4n\rp, \wid_{\hid \lp \mathsf{G}_d\rp} \lp \mathsf{G}_d\rp \right\}\nonumber
|
|
|
|
\param \lp \mathsf{UES}^{N,h,q,\ve}_{n,\mathsf{G}_d,\Omega, \fn}\rp &\les \fn^2 \cdot \lb \frac{360q}{q-2} \lb \log_2 \lp \ve^{-1} \rp +q+1 \rb +324+ 48n\right. \nonumber\\ &\left. +24 \wid_{\hid\lp \mathsf{G}_d\rp}\lp \mathsf{G}_d\rp + 4\max \left\{\param \lp \mathsf{E}^{N,h,q,\ve}_{n}\rp, \param \lp \mathsf{G}_d\rp \right\} \rb
|
|
|
|
\end{align}
|
|
|
|
\end{align}
|
|
|
|
\item It is also the case that:
|
|
|
|
\item It is also the case that:
|
|
|
|
\begin{align}
|
|
|
|
\begin{align}
|
|
|
@ -805,7 +814,7 @@ Let $t \in \lp 0,\infty\rp$ and $T \in \lp t,\infty\rp$. Let $\lp \Omega, \mathc
|
|
|
|
|
|
|
|
|
|
|
|
Whence by Lemma \ref{comp_prop} it is the case that $\dep \lp \mathsf{UES}^{N,h,q,\ve}_{n,\mathsf{G}_d,\Omega,\fn} \rp = \dep \lp \mathsf{UEX}^{N,h,q,\ve}_{n,\mathsf{G}_d,\omega_i}\rp$. This then proves Item (ii).
|
|
|
|
Whence by Lemma \ref{comp_prop} it is the case that $\dep \lp \mathsf{UES}^{N,h,q,\ve}_{n,\mathsf{G}_d,\Omega,\fn} \rp = \dep \lp \mathsf{UEX}^{N,h,q,\ve}_{n,\mathsf{G}_d,\omega_i}\rp$. This then proves Item (ii).
|
|
|
|
|
|
|
|
|
|
|
|
Next, observe that each of the $\mathsf{UEX}$ networks has the same architecture by construction. Corollary \ref{cor:sameparal} then yields that:
|
|
|
|
Next, observe that each of the $\mathsf{UEX}^{N,h,q,\ve}_{n,\mathsf{G}_d,\omega_i}$ networks has the same architecture for all $\omega_i \in \Omega$ by construction. Corollary \ref{cor:sameparal} then yields that:
|
|
|
|
\begin{align}
|
|
|
|
\begin{align}
|
|
|
|
\param \lp \boxminus_{i=1}^{\mathfrak{n}} \mathsf{UEX}^{N,h,q,\ve}_{n,\mathsf{G}_d,\omega_i} \rp \les \mathfrak{n}^2\cdot \param \lp \mathsf{UEX}^{N,h,q,\ve}_{n,\mathsf{G}_d,\omega_i}\rp
|
|
|
|
\param \lp \boxminus_{i=1}^{\mathfrak{n}} \mathsf{UEX}^{N,h,q,\ve}_{n,\mathsf{G}_d,\omega_i} \rp \les \mathfrak{n}^2\cdot \param \lp \mathsf{UEX}^{N,h,q,\ve}_{n,\mathsf{G}_d,\omega_i}\rp
|
|
|
|
\end{align}
|
|
|
|
\end{align}
|
|
|
@ -825,27 +834,78 @@ Let $t \in \lp 0,\infty\rp$ and $T \in \lp t,\infty\rp$. Let $\lp \Omega, \mathc
|
|
|
|
\end{align}
|
|
|
|
\end{align}
|
|
|
|
Now observe that by the triangle inequality, we have that:
|
|
|
|
Now observe that by the triangle inequality, we have that:
|
|
|
|
\begin{align}
|
|
|
|
\begin{align}
|
|
|
|
&\left| \E \lb \exp \lp \int^T_t f\lp \mathcal{X}^{d,t,x}_{r,\Omega}\rp ds\rp u_d^T\lp \mathcal{X}^{d,t,x}_{r,\Omega}\rp\rb - \real_{\rect}\lp \mathsf{UES}^{N,h,q,\ve}_{n,\mathsf{G}_d,\Omega, \fn}\rp \right| \nonumber \\
|
|
|
|
&\left| \E \lb \exp \lp \int^T_t f ds\rp \fu_d^T\lp x\rp\rb - \real_{\rect}\lp \mathsf{UES}^{N,h,q,\ve}_{n,\mathsf{G}_d,\Omega, \fn}\rp \right| \label{big_eqn_lhs} \\
|
|
|
|
&=\left| \E \lb \exp \lp \int^T_t f\lp \mathcal{X}^{d,t,x}_{r,\omega_i}\rp ds\rp u_d^T\lp \mathcal{X}^{d,t,x}_{r,\omega_i}\rp \rb - \inst_{\rect}\lb \frac{1}{\mathfrak{n}} \triangleright\lp \sm_{\mathfrak{n},1}\bullet\lb \boxminus_{i=1}^{\mathfrak{n}} \mathsf{UEX}^{N,h,q,\ve}_{n,\mathsf{G}_d,\omega_i}\rb\rp \rb\right| \nonumber\\
|
|
|
|
&=\left| \E \lb \exp \lp \int^T_t f ds\rp \fu_d^T\lp x\rp \rb - \inst_{\rect}\lb \frac{1}{\mathfrak{n}} \triangleright\lp \sm_{\mathfrak{n},1}\bullet\lb \boxminus_{i=1}^{\mathfrak{n}} \mathsf{UEX}^{N,h,q,\ve}_{n,\mathsf{G}_d,\omega_i}\rb\rp \rb\right| \nonumber\\
|
|
|
|
&\les \left| \E \lb \exp \lp \int^T_t f\lp \mathcal{X}^{d,t,x}_{r,\omega_i}\rp ds\rp u_d^T\lp \mathcal{X}^{d,t,x}_{r,\omega_i}\rp \rb - \frac{1}{\mathfrak{n}}\lb \sum^{\mathfrak{n}}_{i=1}\lb \exp \lp \int_t^T f\lp \mathcal{X}^{d,t,x}_{r,\omega_i}\rp ds \cdot u_d^T\lp \mathcal{X}^{d,t,x}_{r,\omega_i}\rp\rp\rb \rb \right|\nonumber \\
|
|
|
|
&\les \left| \E \lb \exp \lp \int^T_t f ds\rp \fu_d^T\lp x\rp \rb - \frac{1}{\mathfrak{n}}\lb \sum^{\mathfrak{n}}_{i=1}\lb \exp \lp \int_t^T f ds \rp \cdot \fu_d^T\lp x\rp\rb \rb \right|\label{big_eqn_rhs_summand_1} \\
|
|
|
|
&+\left| \frac{1}{\mathfrak{n}}\lb \sum^{\mathfrak{n}}_{i=1}\lb \exp \lp \int_t^T f\lp \mathcal{X}^{d,t,x}_{r,\omega_i}\rp ds \cdot u_d^T\lp \mathcal{X}^{d,t,x}_{r,\omega_i}\rp\rp\rb \rb - \real_{\rect}\lb \frac{1}{\mathfrak{n}} \triangleright\lp \sm_{\mathfrak{n},1}\bullet\lb \boxminus_{i=1}^{\mathfrak{n}} \mathsf{UEX}^{N,h,q,\ve}_{n,\mathsf{G}_d,\omega_i}\rb\rp \rb\right| \nonumber \\
|
|
|
|
&+\left| \frac{1}{\mathfrak{n}}\lb \sum^{\mathfrak{n}}_{i=1}\lb \exp \lp \int_t^T f\lp \mathcal{X}^{d,t,x}_{r,\omega_i}\rp ds \rp \cdot \fu_d^T\lp \mathcal{X}^{d,t,x}_{r,\omega_i}\rp\rb \rb - \real_{\rect}\lb \frac{1}{\mathfrak{n}} \triangleright\lp \sm_{\mathfrak{n},1}\bullet\lb \boxminus_{i=1}^{\mathfrak{n}} \mathsf{UEX}^{N,h,q,\ve}_{n,\mathsf{G}_d,\omega_i}\rb\rp \rb\right| \label{big_eqn_lhs_summand_2}
|
|
|
|
\end{align}
|
|
|
|
\end{align}
|
|
|
|
Observe that by the triangle inequality, the absolute homogeneity condition for norms, the fact that the Brownian motions are independent of each other, Lemma \ref{lem:sm_sum}, the fact that $\mathfrak{n}\in \N$, the fact that the upper limit of error remains bounded by the same bound for all $\omega_i \in \Omega$, and Lemma \ref{sum_of_errors_of_stacking}, then renders the second summand as:
|
|
|
|
Observe that by the triangle inequality, the absolute homogeneity condition for norms, the fact that the Brownian motions are independent of each other, Lemma \ref{lem:sm_sum}, the fact that $\mathfrak{n}\in \N$, the fact that the upper limit of error remains bounded by the same bound for all $\omega_i \in \Omega$, and Lemma \ref{sum_of_errors_of_stacking}, then renders the second summand, (\ref{big_eqn_lhs_summand_2}), as:
|
|
|
|
\begin{align}
|
|
|
|
\begin{align}
|
|
|
|
&\left| \frac{1}{\mathfrak{n}}\lb \sum^{\mathfrak{n}}_{i=1}\lb \exp \lp \int_t^T f\lp \mathcal{X}^{d,t,x}_{r,\omega_i}\rp ds \cdot u_d^T\lp \mathcal{X}^{d,t,x}_{r,\omega_i}\rp\rp\rb \rb - \real_{\rect}\lb \frac{1}{\mathfrak{n}} \triangleright\lp \sm_{\mathfrak{n},1}\bullet\lb \boxminus_{i=1}^{\mathfrak{n}} \mathsf{UEX}^{N,h,q,\ve}_{n,\mathsf{G}_d,\omega_i}\rb\rp\rb\right| \nonumber \\
|
|
|
|
&\left| \frac{1}{\mathfrak{n}}\lb \sum^{\mathfrak{n}}_{i=1}\lb \exp \lp \int_t^T f\lp \mathcal{X}^{d,t,x}_{r,\omega_i}\rp ds \cdot u_d^T\lp \mathcal{X}^{d,t,x}_{r,\omega_i}\rp\rp\rb \rb - \real_{\rect}\lb \frac{1}{\mathfrak{n}} \triangleright\lp \sm_{\mathfrak{n},1}\bullet\lb \boxminus_{i=1}^{\mathfrak{n}} \mathsf{UEX}^{N,h,q,\ve}_{n,\mathsf{G}_d,\omega_i}\rb\rp\rb\right| \nonumber \\
|
|
|
|
&\les \left|\frac{1}{\mathfrak{n}}\lb \sum^{\mathfrak{n}}_{i=1} \exp \lp \int_t^T f\lp \mathcal{X}^{d,t,x}_{r,\omega_i}\rp ds \cdot u_d^T\lp \mathcal{X}^{d,t,x}_{r,\omega_i}\rp\rp \rb - \frac{1}{\mathfrak{n}}\lb \sum^{\mathfrak{n}}_{i=1}\lp \real_{\rect}\lb \mathsf{UEX}^{N,h,q,\ve}_{n,\mathsf{G}_d,\omega_i}\rb\rp \rb \right| \nonumber \\
|
|
|
|
&\les \left|\frac{1}{\mathfrak{n}}\lb \sum^{\mathfrak{n}}_{i=1} \exp \lp \int_t^T f\lp \mathcal{X}^{d,t,x}_{r,\omega_i}\rp ds \cdot u_d^T\lp \mathcal{X}^{d,t,x}_{r,\omega_i}\rp\rp \rb - \frac{1}{\mathfrak{n}}\lb \sum^{\mathfrak{n}}_{i=1}\lp \real_{\rect}\lb \mathsf{UEX}^{N,h,q,\ve}_{n,\mathsf{G}_d,\omega_i}\rb\rp \rb \right| \nonumber \\
|
|
|
|
&\les \cancel{\frac{1}{\mathfrak{n}} \sum^{\mathfrak{n}}_{i=1}}\left| \exp \lp \int^T_tf\lp \mathcal{X}^{d,t,x}_{r,\omega_i}\rp ds \cdot u^T_d\lp \mathcal{X}^{d,t,x}_{r,\omega_i}\rp\rp - \mathsf{UEX}^{N,h,q,\ve}_{n,\mathsf{G}_d,\omega_i}\right| \nonumber\\
|
|
|
|
&\les \cancel{\frac{1}{\mathfrak{n}} \sum^{\mathfrak{n}}_{i=1}}\left| \exp \lp \int^T_tf\lp \mathcal{X}^{d,t,x}_{r,\omega_i}\rp ds \cdot u^T_d\lp \mathcal{X}^{d,t,x}_{r,\omega_i}\rp\rp - \real_{\rect}\lp \mathsf{UEX}^{N,h,q,\ve}_{n,\mathsf{G}_d,\omega_i} \rp \right| \nonumber\\
|
|
|
|
&\les \left| \exp \lp \int^T_tf\lp \mathcal{X}^{d,t,x}_{r,\omega_i}\rp ds \cdot u^T_d\lp \mathcal{X}^{d,t,x}_{r,\omega_i}\rp\rp - \mathsf{UEX}^{N,h,q,\ve}_{n,\mathsf{G}_d,\omega_i}\right| \nonumber\\
|
|
|
|
&\les \left| \exp \lp \int^T_tf\lp \mathcal{X}^{d,t,x}_{r,\omega_i}\rp ds \cdot u^T_d\lp \mathcal{X}^{d,t,x}_{r,\omega_i}\rp\rp - \real_{\rect}\lp \mathsf{UEX}^{N,h,q,\ve}_{n,\mathsf{G}_d,\omega_i}\rp \right| \nonumber
|
|
|
|
|
|
|
|
\end{align}
|
|
|
|
|
|
|
|
This renders (\ref{big_eqn_lhs}) as:
|
|
|
|
|
|
|
|
\begin{align}
|
|
|
|
|
|
|
|
&\left| \E \lb \exp \lp \int^T_t f ds\rp \fu_d^T\lp x\rp\rb - \real_{\rect}\lp \mathsf{UES}^{N,h,q,\ve}_{n,\mathsf{G}_d,\Omega, \fn}\rp \right| \nonumber \\
|
|
|
|
|
|
|
|
&\les \left| \E \lb \exp \lp \int^T_t f ds\rp \fu_d^T\lp x\rp \rb - \frac{1}{\mathfrak{n}}\lb \sum^{\mathfrak{n}}_{i=1}\lb \exp \lp \int_t^T f ds \rp \cdot \fu_d^T\lp x\rp\rb \rb \right| \nonumber \\
|
|
|
|
|
|
|
|
&+\left| \exp \lp \int^T_tf\lp \mathcal{X}^{d,t,x}_{r,\omega_i}\rp ds \cdot u^T_d\lp \mathcal{X}^{d,t,x}_{r,\omega_i}\rp\rp - \real_{\rect}\lp \mathsf{UEX}^{N,h,q,\ve}_{n,\mathsf{G}_d,\omega_i}\rp \right|
|
|
|
|
|
|
|
|
\end{align}
|
|
|
|
|
|
|
|
Taking the expectation on both sides of this inequality, and applying the linearity and monotonicity of expectation yields:
|
|
|
|
|
|
|
|
\begin{align}
|
|
|
|
|
|
|
|
&\E \lb \left| \E \lb \exp \lp \int^T_t f ds\rp \fu_d^T\lp x\rp\rb - \real_{\rect}\lp \mathsf{UES}^{N,h,q,\ve}_{n,\mathsf{G}_d,\Omega, \fn}\rp \right|\rb \label{big_eqn_stage_2_lhs}\\
|
|
|
|
|
|
|
|
&\les \E \lb \left| \E \lb \exp \lp \int^T_t f ds\rp \fu_d^T\lp x\rp \rb - \frac{1}{\mathfrak{n}}\lb \sum^{\mathfrak{n}}_{i=1}\lb \exp \lp \int_t^T f ds \rp \cdot \fu_d^T\lp x\rp\rb \rb \right|\rb \label{big_eqn_stage_2_rhs_1} \\
|
|
|
|
|
|
|
|
&+\E\lb \left| \exp \lp \int^T_tf\lp \mathcal{X}^{d,t,x}_{r,\omega_i}\rp ds \cdot u^T_d\lp \mathcal{X}^{d,t,x}_{r,\omega_i}\rp\rp - \real_{\rect}\lp \mathsf{UEX}^{N,h,q,\ve}_{n,\mathsf{G}_d,\omega_i}\rp \right|\rb \label{big_eqn_stage_2_rhs_2}
|
|
|
|
|
|
|
|
\end{align}
|
|
|
|
|
|
|
|
Consider now, the Lyapunov inequality applied to (\ref{big_eqn_stage_2_rhs_1}), which renders it as:
|
|
|
|
|
|
|
|
\begin{align}
|
|
|
|
|
|
|
|
&\E \lb \left| \E \lb \exp \lp \int^T_t f ds\rp \fu_d^T\lp x\rp \rb - \frac{1}{\mathfrak{n}}\lb \sum^{\mathfrak{n}}_{i=1}\lb \exp \lp \int_t^T f ds \rp \cdot \fu_d^T\lp x\rp\rb \rb \right|\rb \nonumber\\
|
|
|
|
|
|
|
|
&\les \lp \E \lb \left| \E \lb \exp \lp \int^T_t f ds\rp \fu_d^T\lp x\rp \rb - \frac{1}{\mathfrak{n}}\lb \sum^{\mathfrak{n}}_{i=1}\lb \exp \lp \int_t^T f ds \rp \cdot \fu_d^T\lp x\rp\rb \rb \right|^2\rb \rp^{\frac{1}{2}} \label{where_grohs_will be applied}
|
|
|
|
|
|
|
|
\end{align}
|
|
|
|
|
|
|
|
Then, \cite[Corollary~2.6]{grohsetal} applied to (\ref{where_grohs_will be applied}), then yields that:
|
|
|
|
|
|
|
|
\begin{align}
|
|
|
|
|
|
|
|
&\lp \E \lb \left| \E \lb \exp \lp \int^T_t f ds\rp \fu_d^T\lp x\rp \rb - \frac{1}{\mathfrak{n}}\lb \sum^{\mathfrak{n}}_{i=1}\lb \exp \lp \int_t^T f ds \rp \cdot \fu_d^T\lp x\rp\rb \rb \right|^2\rb \rp^{\frac{1}{2}} \nonumber\\
|
|
|
|
|
|
|
|
&\les 2\sqrt{\frac{1}{\fn}} \lp \E \lb \left| \E \lb \exp \lp \int^T_t f ds\rp \fu_d^T\lp x\rp \rb \right|^2\rb \rp^{\frac{1}{2}}
|
|
|
|
|
|
|
|
\end{align}
|
|
|
|
|
|
|
|
Looking back at (\ref{big_eqn_stage_2_rhs_2}), we see that the monotonicity and linearity of expectation tells us that:
|
|
|
|
|
|
|
|
\begin{align}
|
|
|
|
|
|
|
|
&\E\lb \left| \exp \lp \int^T_tf\lp \mathcal{X}^{d,t,x}_{r,\omega_i}\rp ds \cdot u^T_d\lp \mathcal{X}^{d,t,x}_{r,\omega_i}\rp\rp - \real_{\rect}\lp \mathsf{UEX}^{N,h,q,\ve}_{n,\mathsf{G}_d,\omega_i}\rp \right|\rb \\
|
|
|
|
|
|
|
|
&\les \E \lb 3\ve +2\ve \left| \fu^T_d\lp x\rp\right|^q+2\ve \left| \exp \lp \int^b_afdx\rp\right|^q + \ve \left| \exp \lp \int^b_afdx\rp - \mathfrak{e}\right|^q -\mathfrak{e}\fu^T_d\lp x \rp\rb \\
|
|
|
|
|
|
|
|
&\les 3\ve +2\ve \cdot\E\lb \left| \fu^T_d\lp x\rp\right|^q\rb + 2\ve \cdot \E \lb \left| \exp \lp \int^b_afdx\rp\right|^q\rb + \ve \cdot\E \lb \left| \exp \lp \int^b_a f dx\rp - \mathfrak{e}\right|^q\rb -\fe\cdot \E \lb \fu_d^T \lp x\rp\rb \nonumber\\
|
|
|
|
|
|
|
|
\end{align}
|
|
|
|
|
|
|
|
Note that:
|
|
|
|
|
|
|
|
\begin{align}
|
|
|
|
|
|
|
|
\E\lb \mathcal{X}^{d,t,x}_s\rb &= \E\lb x + \int^t_s \sqrt{2} d\mathcal{W}^d_r\rb \nonumber\\
|
|
|
|
|
|
|
|
&\les x + \sqrt{2}\cdot\E \lb \int^t_s d\mathcal{W}^d_r \rb \\
|
|
|
|
|
|
|
|
&= x + \sqrt{2}\cdot \E \lb \mathcal{W}^d_{t-s}\rb \\
|
|
|
|
|
|
|
|
&= x
|
|
|
|
|
|
|
|
\end{align}
|
|
|
|
|
|
|
|
Consider now:
|
|
|
|
|
|
|
|
\begin{align}
|
|
|
|
|
|
|
|
\va \lb \cX^{d,t,x}_s\rb &= \va \lb x + \int^t_s \sqrt{2}d\cW^d_r\rb \nonumber \\
|
|
|
|
|
|
|
|
&= \E \lb\lp x+\int^t_s\sqrt{2}d\cW^d_r - \E \lb x+\int^t_s\sqrt{2}d\cW^d_r\rb\rp^2\rb \nonumber\\
|
|
|
|
|
|
|
|
&=\E \lb\lp x+\int^t_s\sqrt{2}d\cW^d_r -x\rp^2\rb \nonumber \\
|
|
|
|
|
|
|
|
&=2\cdot \E\lb \lp \int^t_s d\cW_r^d\rp^2\rb \nonumber\\
|
|
|
|
|
|
|
|
&=2\cdot \E \lb \lp \cW^d_{t-s}\rp^2\rb
|
|
|
|
\end{align}
|
|
|
|
\end{align}
|
|
|
|
% Note that by Lemma \ref{iobm} each of the $\mathcal{X}^{d,t,x}_{r,\omega_i}$ are pairwise independent of each other for all $i \in \{1,2,\hdots,\mathfrak{n}\}$. Note also that by Definition \ref{def:brown_motion} it is the case, for all $\omega_i \in \Omega$ that $\mathcal{X}^{d,t,x}_{T,\omega_i} \sim \norm \lp \mymathbb{0}_d, \diag_d(T) \rp$
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Note for the first summand that it is in $\mathcal{O}\lp \frac{1}{\sqrt{\mathfrak{n}}}\rp$. Notice that both $f$ and $\fu^T_d$ are continuous functions for $d\in \N$. Note also that $F:[t,T] \rightarrow \R$ defined as:
|
|
|
|
|
|
|
|
\begin{align}
|
|
|
|
|
|
|
|
F(\fx) \coloneqq \int_t^\ft f\lp\fx\rp dx
|
|
|
|
|
|
|
|
\end{align}
|
|
|
|
|
|
|
|
is continuous on $\lb t,T\rb$. Thus , notice that \cite[Theorem~2.1]{rio_moment_2009} with $k$
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
\end{proof}
|
|
|
|
\end{proof}
|
|
|
|
|
|
|
|
Note now that:
|
|
|
|
|
|
|
|
\begin{align}
|
|
|
|
|
|
|
|
\va \lb \cW^d_{t-s}\rb &= \E \lb \lp \cW_{t-s}^d\rp^2\rb - \E \lb \cW^d_{t-s}\rb^2 \nonumber \\
|
|
|
|
|
|
|
|
\E\lb \lp \cW^d_{t-s}\rp^2\rb &= \lp t-s \rp\mathbb{I}_d \\
|
|
|
|
|
|
|
|
2\cdot \E\lb \lp \cW^d_{t-s}\rp^2\rb &= 2\lp t-s\rp\mathbb{I}_d
|
|
|
|
|
|
|
|
\end{align}
|
|
|
|
|
|
|
|
Now note that since $\cW^d_r$ are standard Brownian motions, and their expectation and variance are $\mymathbb{0}_d$ and $\mathbb{I}_d$ respectively. Whence it is the case that the probability density function for $\cW_{t-s}^d$ is:
|
|
|
|
|
|
|
|
\begin{align}
|
|
|
|
|
|
|
|
\lp 2\pi\rp^{-\frac{d}{2}}\lp t-s\rp^{-\frac{1}{2}}\exp \lp \frac{-1}{2(t-s)}\mymathbb{e}_{1,d}\cdot \lb x \rb_*^2\rp
|
|
|
|
|
|
|
|
\end{align}
|
|
|
|
|
|
|
|
However $\cX^{d,t,x}_s$ is a shifted normal distribution
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
\begin{remark}
|
|
|
|
\begin{remark}
|
|
|
|
Note that diagrammatically, this can be represented as in figure below.
|
|
|
|
Note that diagrammatically, this can be represented as in figure below.
|
|
|
|
\begin{figure}[h]
|
|
|
|
\begin{figure}[h]
|
|
|
@ -1017,6 +1077,8 @@ is continuous on $\lb t,T\rb$. Thus , notice that \cite[Theorem~2.1]{rio_moment_
|
|
|
|
\caption{Neural network diagram for the $\mathsf{UES}$ network.}
|
|
|
|
\caption{Neural network diagram for the $\mathsf{UES}$ network.}
|
|
|
|
\end{figure}
|
|
|
|
\end{figure}
|
|
|
|
\end{remark}
|
|
|
|
\end{remark}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
\begin{remark}
|
|
|
|
\begin{remark}
|
|
|
|
It may be helpful to think of this as a very crude form of ensembling.
|
|
|
|
It may be helpful to think of this as a very crude form of ensembling.
|
|
|
|
\end{remark}
|
|
|
|
\end{remark}
|
|
|
|