Added the MC network

This commit is contained in:
Shakil Rafi 2024-02-19 16:27:30 -06:00
parent a09828e5e8
commit 102638ab05
4 changed files with 37 additions and 36 deletions

View File

@ -367,17 +367,17 @@ We say that $f \in \Theta(g(x))$ if there exists $c_1,c_2,x_0 \in \lp 0,\infty\r
We will stipulate that when concatenating vectors as $x_1 \frown x_2$, $x_1$ is on top, and $x_2$ is at the bottom.
\end{remark}
\begin{corollary}\label{sum_of_frown_frown_of_sum}
Let $m_1,m_2,n_1,n_2 \in \N$ and let $x \in \R^{m_1}$, $y \in \R^{n_1}$, $\fx\in \R^{m_2}$, and $\fy \in \R^{n_2}$. It is then the case that $\lb x \frown \fx\rb+\lb y \frown \fy\rb = \lb x+y\rb\frown \lb \fx +\fy\rb$.
Let $m_1,m_2,n_1,n_2 \in \N$, such that $m_1 = n_1$, $m_2=n_2$, and let $x \in \R^{m_1}$, $y \in \R^{n_1}$, $\fx\in \R^{m_2}$, and $\fy \in \R^{n_2}$. It is then the case that $\lb x \frown \fx\rb+\lb y \frown \fy\rb = \lb x+y\rb\frown \lb \fx +\fy\rb$.
\end{corollary}
\begin{proof}
This follows straightforwardly from the fact that:
\begin{align}
\lb x \frown \fx \rb + \lb y + \fy\rb = \begin{bmatrix}
\lb x \frown \fx \rb + \lb y \frown \fy\rb = \begin{bmatrix}
x_1 \\ x_2 \\ \vdots \\ x_{m_1} \\ \fx_1 \\ \fx_2 \\ \vdots \\ \fx_{m_2}
\end{bmatrix} + \begin{bmatrix}
y_1 \\ y_2 \\ \vdots \\ y_{n_1} \\ \fy_1\\ \fy_2 \\ \vdots \\ \fy_{n_2}
\end{bmatrix} = \begin{bmatrix}
x_1+y_1 \\ x_2 + y_2 \\ \vdots \\ x_{m_1+n+1} \\ \fx_1+\fy_1 \\ \fx_2 + \fy_2 \\ \vdots \\ \fx_{m_2} + \fy_{n_2}
x_1+y_1 \\ x_2 + y_2 \\ \vdots \\ x_{m_1}+y_{n_1} \\ \fx_1+\fy_1 \\ \fx_2 + \fy_2 \\ \vdots \\ \fx_{m_2} + \fy_{n_2}
\end{bmatrix} = \lb x+y\rb\frown \lb \fx +\fy\rb
\end{align}
\end{proof}

View File

@ -709,7 +709,7 @@ This completes the proof.
% \end{align}
% This completes the proof of the lemma.
%\end{proof}
\section{Linear Interpolation for Multi-Dimensional Functions}
\section{Maximum Convolution Approximations for Multi-Dimensional Functions}
\subsection{The $\nrm^d_1$ Networks}
\begin{definition}[The $\nrm_1^d$ neural network]
We denote by $\lp \nrm_1^d \rp _{d\in \N} \subseteq \neu$ the family of neural networks that satisfy:
@ -1043,13 +1043,13 @@ We will call the approximant $\max_{i \in \{0,1,\hdots, N\}}\{ f_i\}$, the \text
\end{align}
It is then the case that:
\begin{enumerate}[label = (\roman*)]
\item $\inn \lp \mathsf{MC} \rp = d$
\item $\out\lp \mathsf{MC} \rp = 1$
\item $\hid \lp \mathsf{MC} \rp = \left\lceil \log_2 \lp N \rp \right\rceil +1$
\item $\wid_1 \lp \mathsf{MC} \rp = 2dN$
\item for all $i \in \{ 2,3,...\}$ we have $\wid_1 \lp \mathsf{MC} \rp \les 3 \left\lceil \frac{N}{2^{i-1}} \right\rceil$
\item it holds for all $x \in \R^d$ that $\lp \real_{\rect} \lp \mathsf{MC} \rp \rp \lp x \rp = \max_{i \in \{1,2,...,N\}} \lp y_i - L \left\| x-x_i \right\|_1\rp$
\item it holds that $\param \lp \mathsf{MC} \rp \les \left\lceil \lp \frac{2}{3}d^2+3d\rp \lp 1+\frac{1}{2}^{2\lp \left\lceil \log_2\lp d\rp\right\rceil+1 \rp}\rp + 1 \right\rceil + 7N^2d^2 + 3\left\lceil \frac{N}{2}\right\rceil \cdot 2dN$
\item $\inn \lp \mathsf{MC}^{N,d}_{x,y} \rp = d$
\item $\out\lp \mathsf{MC}^{N,d}_{x,y} \rp = 1$
\item $\hid \lp \mathsf{MC}^{N,d}_{x,y} \rp = \left\lceil \log_2 \lp N \rp \right\rceil +1$
\item $\wid_1 \lp \mathsf{MC}^{N,d}_{x,y} \rp = 2dN$
\item for all $i \in \{ 2,3,...\}$ we have $\wid_i \lp \mathsf{MC}^{N,d}_{x,y} \rp \les 3 \left\lceil \frac{N}{2^{i-1}} \right\rceil$
\item it holds for all $x \in \R^d$ that $\lp \real_{\rect} \lp \mathsf{MC}^{N,d}_{x,y} \rp \rp \lp x \rp = \max_{i \in \{1,2,...,N\}} \lp y_i - L \left\| x-x_i \right\|_1\rp$
\item it holds that $\param \lp \mathsf{MC}^{N,d}_{x,y} \rp \les \left\lceil \lp \frac{2}{3}d^2+3d\rp \lp 1+\frac{1}{2}^{2\lp \left\lceil \log_2\lp d\rp\right\rceil+1 \rp}\rp + 1 \right\rceil + 7N^2d^2 + 3\left\lceil \frac{N}{2}\right\rceil \cdot 2dN$
\end{enumerate}
\end{lemma}
\begin{proof}
@ -1057,7 +1057,7 @@ We will call the approximant $\max_{i \in \{0,1,\hdots, N\}}\{ f_i\}$, the \text
\begin{align}
\mathsf{X} = \aff_{-L\mathbb{I}_N,y} \bullet \lp \lb \boxminus_{i=1}^N \mathsf{S}_i \rb \rp \bullet \cpy_{N,d}
\end{align}
Note that (\ref{9.7.20}) and Lemma \ref{comp_prop} tells us that $\out \lp \R \rp = \out \lp \mxm^N \rp = 1$ and $\inn \lp \mathsf{MC} \rp = \inn \lp \cpy_{N,d} \rp =d $. This proves Items (i)--(ii). Next observe that since it is the case that $\hid \lp \cpy_{N,d} \rp$ and $\hid \lp \nrm^d_1 \rp = 1$, Lemma \ref{comp_prop} then tells us that:
Note that (\ref{9.7.20}) and Lemma \ref{comp_prop} tells us that $\out \lp \R \rp = \out \lp \mxm^N \rp = 1$ and $\inn \lp \mathsf{MC}^{N,d}_{x,y} \rp = \inn \lp \cpy_{N,d} \rp =d $. This proves Items (i)--(ii). Next observe that since it is the case that $\hid \lp \cpy_{N,d} \rp$ and $\hid \lp \nrm^d_1 \rp = 1$, Lemma \ref{comp_prop} then tells us that:
\begin{align}
\hid \lp \mathsf{X} \rp = \hid \lp\aff_{-L\mathbb{I}_N,y} \rp + \hid \lp \boxminus_{i=1}^N \mathsf{S}_i\rp + \hid \lp \cpy_{N,d} \rp = 1
\end{align}
@ -1069,19 +1069,19 @@ We will call the approximant $\max_{i \in \{0,1,\hdots, N\}}\{ f_i\}$, the \text
Note next that Lemma \ref{comp_prop} and \cite[Proposition~2.20]{grohs2019spacetime} tells us that:
\begin{align}\label{8.3.33}
\wid_1 \lp \mathsf{MC} \rp = \wid_1 \lp \mathsf{X} \rp = \wid_1 \lp \boxminus^N_{i=1} \mathsf{S}_i\rp = \sum^N_{i=1} \wid_1 \lp \mathsf{S}_i \rp = \sum^N_{i=1} \wid_1 \lp \nrm^d_1 \rp = 2dN
\wid_1 \lp \mathsf{MC}^{N,d}_{x,y} \rp = \wid_1 \lp \mathsf{X} \rp = \wid_1 \lp \boxminus^N_{i=1} \mathsf{S}_i\rp = \sum^N_{i=1} \wid_1 \lp \mathsf{S}_i \rp = \sum^N_{i=1} \wid_1 \lp \nrm^d_1 \rp = 2dN
\end{align}
This establishes Item (iv).
Next observe that the fact that $\hid \lp \mathsf{X} \rp=1$, Lemma \ref{comp_prop} and Lemma \ref{9.7.4} tells us that for all $i \in \{2,3,...\}$ it is the case that:
\begin{align}
\wid_i \lp \mathsf{MC} \rp = \wid_{i-1} \lp \mxm^N \rp \les 3 \left\lceil \frac{N}{2^{i-1}} \right\rceil
\wid_i \lp \mathsf{MC}^{N,d}_{x,y} \rp = \wid_{i-1} \lp \mxm^N \rp \les 3 \left\lceil \frac{N}{2^{i-1}} \right\rceil
\end{align}
This establishes Item (v).
Next observe that Lemma \ref{9.7.2} and Lemma \ref{5.3.3} tells us that for all $x \in \R^d$, $i \in \{1,2,...,N\}$ it holds that:
\begin{align}
\lp \real_{\rect} \lp \mathsf{MC} \rp \rp \lp x \rp - \lp \real_{\rect}\lp \nrm^d_1 \rp \circ \real_{\rect}\lp \aff_{\mathbb{I}_d,-x_i} \rp \rp \lp x \rp = \left\| x-x_i \right\|_1
\lp \real_{\rect} \lp \mathsf{MC}^{N,d}_{x,y} \rp \rp \lp x \rp - \lp \real_{\rect}\lp \nrm^d_1 \rp \circ \real_{\rect}\lp \aff_{\mathbb{I}_d,-x_i} \rp \rp \lp x \rp = \left\| x-x_i \right\|_1
\end{align}
This an \cite[Proposition~2.20]{grohs2019spacetime} combined establishes that for all $x \in \R^d$ it holds that:
\begin{align}
@ -1094,7 +1094,7 @@ We will call the approximant $\max_{i \in \{0,1,\hdots, N\}}\{ f_i\}$, the \text
\end{align}
Then Lemma \ref{comp_prop} and Lemma \ref{9.7.4} tells us that for all $x\in \R^d$ it holds that:
\begin{align}
\lp \real_{\rect} \lp \mathsf{MC} \rp \rp \lp x \rp &= \lp \real_{\rect}\lp \mxm^N \rp \circ \lp \real_{\rect}\lp \mathsf{X} \rp \rp \rp \lp x \rp \nonumber \\
\lp \real_{\rect} \lp \mathsf{MC}^{N,d}_{x,y} \rp \rp \lp x \rp &= \lp \real_{\rect}\lp \mxm^N \rp \circ \lp \real_{\rect}\lp \mathsf{X} \rp \rp \rp \lp x \rp \nonumber \\
&= \lp \real_{\rect}\lp \mxm^N \rp \rp \lp y_1-L \|x-x_1\|_1,y_2-L\|x-x_2\|_1,...,y_N-L\|x-x_N\|_1\rp \nonumber\\
&=\max_{i\in \{1,2,...,N\} } \lp y_i - L \|x-x_i\|_1\rp
\end{align}
@ -1118,7 +1118,7 @@ We will call the approximant $\max_{i \in \{0,1,\hdots, N\}}\{ f_i\}$, the \text
\end{align}
Finally Lemma \ref{comp_prop}, (\ref{8.3.33}), and Lemma \ref{lem:mxm_prop} yields that:
\begin{align}
\param(\mathsf{MC}) &= \param \lp \mxm^N \bullet \aff_{-L\mathbb{I}_N,y} \bullet \lp \boxminus_{i=1}^N \lb \nrm^d_1 \bullet \aff_{\mathbb{I}_d,-x_i} \rb \rp \bullet \cpy_{N,d} \rp \nonumber\\
\param(\mathsf{MC}^{N,d}_{x,y}) &= \param \lp \mxm^N \bullet \aff_{-L\mathbb{I}_N,y} \bullet \lp \boxminus_{i=1}^N \lb \nrm^d_1 \bullet \aff_{\mathbb{I}_d,-x_i} \rb \rp \bullet \cpy_{N,d} \rp \nonumber\\
&\les \param \lp \mxm^N \bullet \lp \boxminus_{i=1}^N \lb \nrm^d_1\bullet \aff_{\mathbb{I}_d, -x} \rb \rp \rp \nonumber\\
&\les \param \lp \mxm^N \rp + \param \lp \lp \boxminus_{i=1}^N \lb \nrm^d_1\bullet \aff_{\mathbb{I}_d, -x} \rb\rp \rp + \nonumber\\ &\wid_1\lp \mxm^N\rp \cdot \wid_{\hid \lp \boxminus_{i=1}^N \lb \nrm^d_1\bullet \aff_{\mathbb{I}_d, -x} \rb\rp} \lp \boxminus_{i=1}^N \lb \nrm^d_1\bullet \aff_{\mathbb{I}_d, -x} \rb\rp \nonumber \\
&\les \left\lceil \lp \frac{2}{3}d^2+3d\rp \lp 1+\frac{1}{2}^{2\lp \left\lceil \log_2\lp d\rp\right\rceil+1 \rp}\rp + 1 \right\rceil + 7N^2d^2 + 3\left\lceil \frac{N}{2}\right\rceil \cdot 2dN
@ -1316,14 +1316,14 @@ We will call the approximant $\max_{i \in \{0,1,\hdots, N\}}\{ f_i\}$, the \text
\end{proof}
\subsection{Explicit ANN Approximations }
\begin{lemma}
\begin{lemma}\label{lem:maxconv_accuracy}
Let $d,N \in \N$, $L \in \lb 0,\infty \rp$. Let $E \subseteq \R^d$. Let $x_1,x_2,...,x_N \in E$, let $f:E \rightarrow \R$ satisfy for all $x_1,y_1 \in E$ that $\left| f(x_1) -f(y_1)\right| \les L \left\| x_1-x_2 \right\|_1$ and let $\mathsf{MC} \in \neu$ and $y = \lp f\lp x_1 \rp, f \lp x_2 \rp,...,f\lp x_N \rp\rp$ satisfy:
\begin{align}
\mathsf{MC} = \mxm^N \bullet \aff_{-L\mathbb{I}_N,y} \bullet \lb \boxminus^N_{i=1} \nrm^d_1 \bullet \aff_{\mathbb{I}_d,-x_i} \rb \bullet \cpy_{N,d}
\mathsf{MC}^{N,d}_{x,y} = \mxm^N \bullet \aff_{-L\mathbb{I}_N,y} \bullet \lb \boxminus^N_{i=1} \nrm^d_1 \bullet \aff_{\mathbb{I}_d,-x_i} \rb \bullet \cpy_{N,d}
\end{align}
It is then the case that:
\begin{align}\label{(9.7.42)}
\sup_{x\in E} \left| \lp \real_{\rect}\lp \mathsf{MC} \rp \rp \lp x \rp -f\lp x \rp \right| \les 2L \lb \sup _{x\in E} \lp \min_{i\in \{1,2,...,N\}} \left\| x-x_i\right\|_1\rp\rb
\sup_{x\in E} \left| \lp \real_{\rect}\lp \mathsf{MC}^{N,d}_{x,y} \rp \rp \lp x \rp -f\lp x \rp \right| \les 2L \lb \sup _{x\in E} \lp \min_{i\in \{1,2,...,N\}} \left\| x-x_i\right\|_1\rp\rb
\end{align}
\end{lemma}
\begin{proof}
@ -1338,22 +1338,24 @@ We will call the approximant $\max_{i \in \{0,1,\hdots, N\}}\{ f_i\}$, the \text
Then Lemma \ref{(9.7.5)} tells us that for all $x\in E$ it holds that $F(x) = \lp \real_{\rect} \lp \mathsf{MC} \rp \rp \lp x \rp$. This combined with (\ref{(9.7.44)}) establishes (\ref{(9.7.42)}).
\end{proof}
\begin{lemma}
Let $d,N \in \N$, $L \in \lb 0,\infty \rp$. Let $E \subseteq \R^d$. Let $x_1,x_2,...,x_N \in E$, let $f:E \rightarrow \R$ satisfy for all $x_1,x_2 \in E$ that $\left| f(x_1) -f(x_2)\right| \les L \left\| x_1-x_2 \right\|_1$ and let $\mathsf{MC} \in \neu$ and $y = \lp f\lp x_1 \rp, f \lp x_2 \rp,...,f\lp x_N \rp\rp$ satisfy:
Let $d,N \in \N$, $L \in \lb 0,\infty \rp$. Let $\lb a,b\rb \subsetneq \R^d$. Let $x_1,x_2,...,x_N \in \lb a,b\rb$, let $f:\lb a,b\rb \rightarrow \R$ satisfy for all $x_1,x_2 \in \lb a,b\rb$ that $\left| f(x_1) -f(x_2)\right| \les L \left| x_1-x_2 \right|$ and let $\mathsf{MC}^{N,1}_{x,y} \in \neu$ and $y = f\lp \lb x \rb_*\rp$ satisfy:
\begin{align}
\mathsf{MC} = \mxm^N \bullet \aff_{-L\mathbb{I}_N,y} \bullet \lb \boxminus^N_{i=1} \nrm^d_1 \bullet \aff_{\mathbb{I}_d,-x_i} \rb \bullet \cpy_{N,d}
\end{align}
It is then the case that:
\begin{align}
\lim_{N \rightarrow \infty} \lb \mathbb{P} \lp \sup_{x\in E} \left| \lp \real_{\rect}\lp \mathsf{MC} \rp \rp \lp x \rp -f\lp x \rp \right| >0 \rp\rb \rightarrow 0
\mathsf{MC}^{N,1}_{x,y} = \mxm^N \bullet \aff_{-L\mathbb{I}_N,y} \bullet \lb \boxminus^N_{i=1} \nrm^1_1 \bullet \aff_{1,-x_i} \rb \bullet \cpy_{N,1}
\end{align}
It is then the case that for approximant $\mathsf{MC}^{N,1}_{x,y}$ that:
\begin{enumerate}[label = (\roman*)]
\item $\inn \lp \mathsf{MC}^{N,1}_{x,y} \rp = 1$
\item $\out\lp \mathsf{MC}^{N,1}_{x,y} \rp = 1$
\item $\hid \lp \mathsf{MC}^{N,1}_{x,y} \rp = \left\lceil \log_2 \lp N \rp \right\rceil +1$
\item $\wid_1 \lp \mathsf{MC}^{N,1}_{x,y} \rp = 2N$
\item for all $i \in \{ 2,3,...\}$ we have $\wid_1 \lp \mathsf{MC}^{N,1}_{x,y} \rp \les 3 \left\lceil \frac{N}{2^{i-1}} \right\rceil$
\item it holds for all $x \in \R^d$ that $\lp \real_{\rect} \lp \mathsf{MC}^{N,1}_{x,y} \rp \rp \lp x \rp = \max_{i \in \{1,2,...,N\}} \lp y_i - L \left| x-x_i \right|\rp$
\item it holds that $\param \lp \mathsf{MC}^{N,1}_{x,y} \rp \les 6 + 7N^2 + 3\left\lceil \frac{N}{2}\right\rceil \cdot 2N$
\item $\sup_{x\in \lb a,b\rb} \left| F(x) - f(x) \right| \les 2L \frac{|a-b|}{N}$
\end{enumerate}
\end{lemma}
\begin{proof}
Note that $N$ is chosen uniformly from set $E \subseteq \R^d$. Note next that (\ref{(9.7.44)}) tells us that:
\begin{align}
&\lim_{N \rightarrow \infty} \lb \mathbb{P} \lp \sup_{x\in E} \left| \lp \real_{\rect}\lp \mathsf{MC} \rp \rp \lp x \rp -f\lp x \rp \right| >0 \rp\rb \nonumber \\
&\les
\end{align}
Items (i)\textemdash(vii) is an assertion of Lemma \ref{lem:mc_prop}. Item (viii) is an assertion of Lemma \ref{lem:maxconv_accuracy} with $d \curvearrowleft 1$.
\end{proof}
@ -1374,4 +1376,3 @@ It is then the case that:

View File

@ -544,7 +544,7 @@ Let $n, N,h\in \N$. Let $\delta,\ve \in \lp 0,\infty \rp $, $q\in \lp 2,\infty \
\end{lemma}
\begin{proof}
Note that (\ref{fc-assertion}) is an assertion of Feynman-Kac. LetNow notice that for $x \in \R^{N+1} \times \R^d$ it is the case that:
Note that (\ref{fc-assertion}) is an assertion of Feynman-Kac. Now notice that for $x \in \R^{N+1} \times \R^d$ it is the case that:
\begin{align}
\real_{\rect} \lp \mathsf{UEX}^{N,h,q,\ve}_{n,\mathsf{G}_d,\omega_i}\rp \lp x\rp &= \real_{\rect} \lp \mathsf{UE}^{N,h,q,\ve}_{n,\mathsf{G}_d} \bullet \lb \tun^{N+1}_1 \boxminus \aff_{\mymathbb{0}_{d,d},\mathcal{X}_{\omega_i}} \rb \rp \lp x\rp\nonumber \\
&=\real_{\rect} \lp \mathsf{UE}^{N,h,q,\ve}_{n,\mathsf{G}_d} \rp \circ \real_{\rect}\lp \lb \tun^{N+1}_1 \boxminus \aff_{\mymathbb{0}_{d,d},\mathcal{X}_{\omega_i}} \rb\rp \lp x \rp \nonumber
@ -730,10 +730,10 @@ Note that for a fixed $T \in \lp 0,\infty \rp$ it is the case that $u_d\lp t,x \
\end{lemma}
\begin{proof}
We will prove this with induction. This is straight-forward for the case where we have just one neural network where for all $x \in \R^{\inn\lp \nu_1\rp}$ it is the case that $\left\|\inst_{\rect}\lp \nu_1\rp \lp x\rp - f\lp x\rp\right\|_1 \les \ve_1 = \sum_{i=1}^1\ve_i$. Suppose now, that, $\left\|\inst_{\rect}\lp \boxminus_i^n\nu_i\rp \lp \fx \rp - \lb \frown_{i=1}^n f_i\rb\lp \fx \rp\right\|_1 \les \sum_{i=1}^n\ve_i$ holds true for all cases upto and including $n$. Consider what happens when we have a triple, a function $f_{n+1}$, a neural network $\nu_{n+1}$, and $\ve_{n+1}\in \lp 0,\infty \rp$ with a maximum error over all $x \in \R^{\inn\lp \nu_1\rp}$ of $|f_{n+1}\lp x\rp - \inst_{\rect}\lp \nu_{n+1}\rp\lp x\rp | \les \ve_{n+1}$. Then Lemma \ref{inst_of_stk}, Corollary \ref{sum_of_frown_frown_of_sum}, and the triangle inequality tells us that:
We will prove this with induction. This is straight-forward for the case where we have just one neural network where for all $x \in \R^{\inn\lp \nu_1\rp}$ it is the case that $\left\|\inst_{\rect}\lp \nu_1\rp \lp x\rp - f\lp x\rp\right\|_1 \les \ve_1 = \sum_{i=1}^1\ve_i$. Suppose now, that, $\left\|\inst_{\rect}\lp \boxminus_i^n\nu_i\rp \lp \fx \rp - \lb \frown_{i=1}^n f_i\rb\lp \fx \rp\right\|_1 \les \sum_{i=1}^n\ve_i$ holds true for all cases upto and including $n$. Consider what happens when we have a triple, a function $f_{n+1}$, a neural network $\nu_{n+1}$, and $\ve_{n+1}\in \lp 0,\infty \rp$ with a maximum error over all $x \in \R^{\inn\lp \nu_1\rp}$ of $| \inst_{\rect}\lp \nu_{n+1}\rp\lp x\rp-f_{n+1}\lp x\rp| \les \ve_{n+1}$. Then Lemma \ref{inst_of_stk}, Corollary \ref{sum_of_frown_frown_of_sum}, and the triangle inequality tells us that:
\begin{align}
&\left\|\inst_{\rect}\lp \boxminus_i^{n+1}\nu_i\rp \lp \fx \rp - \lb \frown_{i=1}^{n+1} f_i\rb\lp \fx \rp\right\|_1 \nonumber \\
&\les \left\|\inst_{\rect}\lp \boxminus_i^n\nu_i\rp \lp \fx \rp - \lb \frown_{i=1}^n f_i\rb\lp \fx \rp\right\|_1 + |f_{n+1}\lp x\rp - \inst_{\rect}\lp \nu_{n+1}\rp\lp x\rp | \nonumber \\
&\les \left\|\inst_{\rect}\lp \boxminus_i^n\nu_i\rp \lp \fx \rp - \lb \frown_{i=1}^n f_i\rb\lp \fx \rp\right\|_1 + | \inst_{\rect}\lp \nu_{n+1}\rp\lp x\rp-f_{n+1}\lp x\rp| \nonumber \\
&\les \sum_{i=1}^{n+1}\ve_i
\end{align}
This proves the inductive case and hence the Lemma.

Binary file not shown.