paper-BagRelationalPDBsAreHard/approx_alg.tex

1048 lines
88 KiB
TeX

%root: main.tex
\section{$1 \pm \epsilon$ Approximation Algorithm}
%\AH{I am attempting to rewrite this section mostly from scratch. This will involve taking 'baby' steps towards the goals we spoke of on Friday 080720 as well as throughout the following week on chat channel.}
%
%\AH{\textbf{BEGIN}: Old stuff.}
%
%
%\begin{proof}
%
%Let us now show a sampling scheme which can run in $O\left(|\poly|\cdot k\right)$ per sample.
%
%First, consider when $\poly$ is already an SOP of pure products. In this case, sampling is trivial, and one would sample from the $\setsize$ terms with probability proportional to the product of probabilitites for each variable in the sampled monomial.
%
%Second, consider when $\poly$ has a POS form with a product width of $k$. In this case, we can view $\poly$ as an expression tree, where the leaves represent the individual values of each factor. The leaves are joined together by either a $\times$ or $+$ internal node, and so on, until we reach the root, which is joining the $k$-$\times$ nodes.
%
%Then for each $\times$ node, we multiply its subtree values, while for each $+$ node, we pick one of its children with probability proportional to the product of probabilities across its variables.
%
%\AH{I think I mean to say a probability proportional to the number of elements in it's given subtree.}
%
%The above sampling scheme is in $O\left(|\poly|\cdot k\right)$ time then, since we have for either case, that at most the scheme would perform within a factor of the $|\poly|$ operations, and those operations are repeated the product width of $k$ times.
%
%Thus, it is the case, that we can approximate $\rpoly(\prob_1,\ldots, \prob_n)$ within the claimed confidence bounds and computation time, thus proving the lemma.\AH{State why.}
%
%\AH{Discuss how we have that $\rpoly \geq O(\setsize)$. Discuss that we need $b-a$ to be small.}
%\end{proof}
%
%\qed
%\AH{{\bf END:} Old Stuff}
%\begin{Definition}[Polynomial]\label{def:polynomial}
%The expression $\poly(\vct{X})$ is a polynomial if it satisfies the standard mathematical definition of polynomial, and additionally is in the standard monomial basis.
%\end{Definition}
%To clarify defintion ~\ref{def:polynomial}, a polynomial in the standard monomial basis is one whose monomials are in SOP form, and whose non-distinct monomials have been collapsed into one distinct monomial, with its corresponding coefficient accurately reflecting the number of monomials combined.
Now, some useful definitions and notation. For illustrative purposes in the definitions below, let us consider when $\poly(\vct{X}) = 2x^2 + 3xy - 2y^2$.
\begin{Definition}[Degree]\label{def:degree}
The degree of polynomial $\poly(\vct{X})$ is the maximum sum of the exponents of a monomial, over all monomials.
\end{Definition}
The degree of $\poly(\vct{X})$ in the above example is $2$. In this paper we consider only finite degree polynomials.
\AH{We need to verify that this definition is consistent with the rest of the paper. Also, it might be useful to specify coefficients are 1?}
\begin{Definition}[Monomial]\label{def:monomial}
A monomial is a product of a fixed set of variables, each raised to a non-negative integer power.
\end{Definition}
For example, the expression $xy$ is a monomial from the term $3xy$ of $\poly(\vct{X})$, produced from the set of variables $\vct{X} = \{x, y\}$.
%\begin{Definition}[$|\vct{X}|$]\label{def:num-vars}
%Denote the number of variables in $\poly(\vct{X})$ as $|\vct{X}|$.
%\end{Definition}
%
%In the running example, $|\vct{X}| = 2$.
\begin{Definition}[Expression Tree]\label{def:express-tree}
An expression tree $\etree$ is a binary %an ADT logically viewed as an n-ary
tree, whose internal nodes are from the set $\{+, \times\}$, with leaf nodes being either from the set $\mathbb{R}$ $(\tnum)$ or from the set of monomials $(\var)$. The members of $\etree$ are \type, \val, \vari{partial}, \vari{children}, and \vari{weight}, where \type is the type of value stored in the node $\etree$ (i.e. one of $\{+, \times, \var, \tnum\}$, \val is the value stored, and \vari{children} is the list of $\etree$'s children where $\etree_\lchild$ is the left child and $\etree_\rchild$ the right child. Remaining fields hold values whose semantics we will fix later. When $\etree$ is used as input of ~\cref{alg:mon-sam} and ~\cref{alg:one-pass}, the values of \vari{partial} and \vari{weight} will not be set. %SEMANTICS FOR \etree: \vari{partial} is the sum of $\etree$'s coefficients , n, and \vari{weight} is the probability of $\etree$ being sampled.
\end{Definition}
Note that $\etree$ need not encode an expression in the standard monomial basis, for example, when $\etree$ represents the expression $(x + 2y)(2x - y)$.
\begin{Definition}[poly$(\cdot)$]\label{def:poly-func}
Denote $poly(\etree)$ to be the function that takes as input expression tree $\etree$ and outputs its corresponding polynomial. Recursively defined on $\etree$ as follows, where $\etree_\lchild$ and $\etree_\rchild$ denote the left and right child of $\etree$ respectively.
% \begin{align*}
% &\etree.\type = +\mapsto&& \polyf(\etree_\lchild) + \polyf(\etree_\rchild)\\
% &\etree.\type = \times\mapsto&& \polyf(\etree_\lchild) \cdot \polyf(\etree_\rchild)\\
% &\etree.\type = \var \text{ OR } \tnum\mapsto&& \etree.\val
% \end{align*}
\begin{equation*}
\polyf(\etree) = \begin{cases}
\polyf(\etree_\lchild) + \polyf(\etree_\rchild) &\text{ if \etree.\type } = +\\
\polyf(\etree_\lchild) \cdot \polyf(\etree_\rchild) &\text{ if \etree.\type } = \times\\
\etree.\val &\text{ if \etree.\type } = \var \text{ OR } \tnum.
\end{cases}
\end{equation*}
\end{Definition}
\AH{
\par2) Below seems like over-defining to me. Is this really necessary? The first sentence I think is \textit{enough}.}
Note that addition and multiplication above follow the standard interpretation over polynomials.
%Specifically, when adding two monomials whose variables and respective exponents agree, the coefficients corresponding to the monomials are added and their sum is multiplied to the monomial. Multiplication here is denoted by concatenation of the monomial and coefficient. When two monomials are multiplied, the product of each corresponding coefficient is computed, and the variables in each monomial are multiplied, i.e., the exponents of like variables are added. Again we notate this by the direct product of coefficient product and all disitinct variables in the two monomials, with newly computed exponents.
\begin{Definition}[Expression Tree Set]\label{def:express-tree-set}$\etreeset{\smb}$ is the set of all possible expression trees $\etree$, such that $poly(\etree) = \poly(\vct{X})$.
\end{Definition}
For our running example, $\etreeset{\smb} = \{2x^2 + 3xy - 2y^2, (x + 2y)(2x - y), x(2x - y) + 2y(2x - y), 2x(x + 2y) - y(x + 2y)\}$. Note that \cref{def:express-tree-set} implies that $\etree \in \etreeset{poly(\etree)}$.
\begin{Definition}[Expanded T]\label{def:expand-tree}
$\expandtree{\etree}$ is the pure sum of products expansion of $\etree$. The logical view of \expandtree{\etree} ~is a list of tuples $(\monom, \coef)$, where $\monom$ is of type monomial and $\coef$ is in $\mathbb{R}$. \expandtree{\etree} has the following recursive definition.
\end{Definition}
% recursively defined as
% \begin{align*}
% &\etree.\type = + \mapsto&& \elist{\expandtree{\etree_\lchild}, \expandtree{\etree_\rchild}}\\
% &\etree.\type = \times \mapsto&& \elist{\expandtree{\etree_\lchild} \otimes \expandtree{\etree_\rchild}}\\
% &\etree.\type = \tnum \mapsto&& \elist{(\emptyset, \etree.\val)}\\
% &\etree.\type = \var \mapsto&& \elist{(\etree.\val, 1)}
% \end{align*}
\begin{align*}
\expandtree{\etree} = \begin{cases}
\expandtree{\etree_\lchild} \circ \expandtree{\etree_\rchild} &\textbf{ if }\etree.\type = +\\
\left\{(\monom_\lchild \cup \monom_\rchild, \coef_\lchild \cdot \coef_\rchild) ~|~ (\monom_\lchild, \coef_\lchild) \in \expandtree{\etree_\lchild}, (\monom_\rchild, \coef_\rchild) \in \expandtree{\etree_\rchild}\right\} &\textbf{ if }\etree.\type = \times\\
\elist{(\emptyset, \etree.\val)} &\textbf{ if }\etree.\type = \tnum\\
\elist{(\{\etree.\val\}, 1)} &\textbf{ if }\etree.\type = \var.\\
\end{cases}
\end{align*}
%where that the multiplication of two tuples %is the standard multiplication over monomials and the standard multiplication over coefficients to produce the product tuple, as in
%is their direct product $(\monom_1, \coef_1) \cdot (\monom_2, \coef_2) = (\monom_1 \cdot \monom_2, \coef_1 \times \coef_2)$ such that monomials $\monom_1$ and $\monom_2$ are concatenated in a product operation, while the standard product operation over reals applies to $\coef_1 \times \coef_2$. The product of $\expandtree{\etree_\lchild} \cdot \expandtree{\etree'_\rchild}$ is then the cross product of the multiplication of all such tuples returned to both $\expandtree{\etree_\lchild}$ and $\expandtree{\etree_\rchild}$. %The operator $\otimes$ is defined as the cross-product tuple multiplication of all such tuples returned by both $\expandtree{\etree_\lchild}$ and $\expandtree{\etree_\rchild}$.
\begin{Example}\label{example:expr-tree-T}
To illustrate with an example, consider the product $(x + 2y)(2x - y)$ and its expression tree $\etree$ in Figure ~\ref{fig:expr-tree-T}. The pure expansion of the product is $2x^2 - xy + 4xy - 2y^2 = \expandtree{\etree}$, logically viewed as $[(2, x^2), (-1, xy), (4, xy), (-2, y^2)]$.
\end{Example}
\begin{figure}[h!]
\begin{tikzpicture}[thick, level distance=0.9cm,level 1/.style={sibling distance=3.55cm}, level 2/.style={sibling distance=1.8cm}, level 3/.style={sibling distance=0.8cm}]% level/.style={sibling distance=6cm/(#1 * 1.5)}]
\node[tree_node](root){$\boldsymbol{\times}$}
child{node[tree_node]{$\boldsymbol{+}$}
child{node[tree_node]{x}
%child[missing]{node[tree_node]{}}
%child{node[tree_node]{x}}
}
child{node[tree_node]{$\boldsymbol{\times}$}
child{node[tree_node]{2}}
child{node[tree_node]{y}}
}
}
child{node[highlight_treenode] (TR) {$\boldsymbol{+}$}
child{node[tree_node]{$\boldsymbol{\times}$}
child{node[tree_node]{2}}
child{node[tree_node]{x}}
}
child{node[tree_node]{$\boldsymbol{\times}$}
child{node[tree_node] (neg-leaf) {-1}}
child{node[tree_node]{y}}
}
%child[sibling distance= 0cm, grow=north east, red]{node[tree_node]{$\etree_\rchild$}}
};
% \node[below=2pt of neg-leaf, inner sep=1pt, blue] (neg-comment) {\textbf{Negation pushed to leaf nodes}};
% \draw[<-|, blue] (neg-leaf) -- (neg-comment);
\node[above right=0.7cm of TR, highlight_color, inner sep=0pt, font=\bfseries] (tr-label) {$\etree_\rchild$};
\node[above right=0.7cm of root, highlight_color, inner sep=0pt, font=\bfseries] (t-label) {$\etree$};
\draw[<-|, highlight_color] (TR) -- (tr-label);
\draw[<-|, highlight_color] (root) -- (t-label);
\end{tikzpicture}
\caption{Expression tree $\etree$ for the product $\boldsymbol{(x + 2y)(2x - y)}$.}
\label{fig:expr-tree-T}
\end{figure}
\begin{Definition}[Positive T]\label{def:positive-tree}
Let the positive tree, denoted $\abs{\etree}$ be the resulting expression tree such that, for each leaf node $\etree'$ of $\etree$ where $\etree'.\type$ is $\tnum$, $\etree'.\vari{value} = |\etree'.\vari{value}|$. %value $\coef$ of each coefficient leaf node in $\etree$ is set to %$\coef_i$ in $\etree$ is exchanged with its absolute value$|\coef|$.
\end{Definition}
Using the same polynomial from the above example, $poly(\abs{\etree}) = (x + 2y)(2x + y) = 2x^2 +xy +4xy + 2y^2 = 2x^2 + 5xy + 2y^2$. Note that this \textit{is not} the same as $\poly(\vct{X})$.
\begin{Definition}[Evaluation]\label{def:exp-poly-eval}
Given an expression tree $\etree$ and $\vct{v} \in \mathbb{R}^\numvar$, $\etree(\vct{v}) = poly(\etree)(\vct{v})$.
\end{Definition}
In the subsequent subsections we lay the groundwork to prove the following theorem.
\begin{Theorem}\label{lem:approx-alg}
For any query polynomial $\poly(\vct{X})$, an approximation of $\rpoly(\prob_1,\ldots, \prob_\numvar)$ can be computed in $O\left(\treesize(\etree) + \frac{\log{\frac{1}{\conf}}\cdot \abs{\etree}^2(1,\ldots, 1)}{\error^2\cdot\rpoly^2(\prob_1,\ldots, \prob_\numvar)}\right)$, with multiplicative $(\error,\delta)$-bounds, where $k$ denotes the degree of $\poly$.
\end{Theorem}
\subsection{Approximating $\rpoly$}
\subsubsection{Description}
Algorithm ~\ref{alg:mon-sam} approximates $\rpoly$ using the following steps. First, a call to $\onepass$ on its input $\etree$ produces a non-biased weight distribution over the monomials of $\expandtree{\etree}$ and a correct count of $|\etree|(1,\ldots, 1)$, i.e., the number of monomials in $\expandtree{\etree}$. Next, ~\cref{alg:mon-sam} calls $\sampmon$ to sample one monomial and its sign from $\expandtree{\etree}$. The sampling is repeated $\ceil{\frac{2\log{\frac{2}{\delta}}}{\epsilon^2}}$ times, where each of the samples are evaluated over $\vct{p}$, multiplied by $1 \times sign$, and summed. The final result is scaled accordingly returning an estimate of $\rpoly$ with the claimed $(\error, \conf)$-bound of ~\cref{lem:mon-samp}.
Kindly recall that the notaion $[x, y]$ denotes the range of values between $x$ and $y$ inclusive. The notation $\{x, y\}$ denotes the set of values consisting of $x$ and $y$.
\subsubsection{Psuedo Code}
\begin{algorithm}[H]
\caption{$\approxq$($\etree$, $\vct{p}$, $\conf$, $\error$)}
\label{alg:mon-sam}
\begin{algorithmic}[1]
\Require \etree: Binary Expression Tree
\Require $\vct{p} = (\prob_1,\ldots, \prob_\numvar)$ $\in [0, 1]^N$
\Require $\conf$ $\in [0, 1]$
\Require $\error$ $\in [0, 1]$
\Ensure \vari{acc} $\in \mathbb{R}$
\State $\accum \gets 0$\label{alg:mon-sam-global1}
\State $\numsamp \gets \ceil{\frac{2 \log{\frac{2}{\conf}}}{\error^2}}$\label{alg:mon-sam-global2}
\State $(\vari{\etree}_\vari{mod}, \vari{size}) \gets $ \onepass($\etree$)\label{alg:mon-sam-onepass}\Comment{$\onepass$ is ~\cref{alg:one-pass} \;and \sampmon \; is ~\cref{alg:sample}}
\For{\vari{i} \text{ in } $1\text{ to }\numsamp$}\Comment{Perform the required number of samples}
\State $(\vari{M}_\vari{i}, \vari{sgn}_\vari{i}) \gets $ \sampmon($\etree_\vari{mod}$)\label{alg:mon-sam-sample}
\State $\vari{Y}_\vari{i} \gets 1$\label{alg:mon-sam-assign1}
\For{$\vari{x}_{\vari{j}}$ \text{ in } $\vari{M}_{\vari{i}}$}
\State $\vari{Y}_\vari{i} \gets \vari{Y}_\vari{i} \times \; \vari{\prob}_\vari{j}$\label{alg:mon-sam-product2} \Comment{$\vari{p}_\vari{j}$ is the assignment to $\vari{x}_\vari{j}$ from input $\vct{p}$}
\EndFor
\State $\vari{Y}_\vari{i} \gets \vari{Y}_\vari{i} \times\; \vari{sgn}_\vari{i}$\label{alg:mon-sam-product}
\State $\accum \gets \accum + \vari{Y}_\vari{i}$\Comment{Store the sum over all samples}\label{alg:mon-sam-add}
\EndFor
\State $\vari{acc} \gets \vari{acc} \times \frac{\vari{size}}{\numsamp}$\label{alg:mon-sam-global3}
\State \Return \vari{acc}
\end{algorithmic}
\end{algorithm}
\subsubsection{Correctness}
We state the lemmas for $\onepass$ and $\sampmon$, the auxiliary algorithms on which ~\cref{alg:mon-sam} relies. Their proofs are subsequent.
\begin{Lemma}\label{lem:one-pass}
The $\onepass$ function completes in $O(size(\etree))$ time. After $\onepass$ returns the following post conditions hold. First, that $\abs{\vari{S}}(1,\ldots, 1)$ is correctly computed for each subtree $\vari{S}$ of $\etree$. Second, when $\vari{S}.\val = +$, the weighted distribution $\frac{\abs{\vari{S}_{\vari{child}}}(1,\ldots, 1)}{\abs{\vari{S}}(1,\ldots, 1)}$ is correctly computed for each child of $\vari{S}.$
\end{Lemma}
At the conclusion of $\onepass$, $\etree.\vari{partial}$ will hold sum of all coefficients in $\expandtree{\abs{\etree}}$, i.e., $\sum\limits_{(\monom, \coef) \in \expandtree{\abs{\etree}}}\coef$. $\etree.\vari{weight}$ will hold the weighted probability that $\etree$ is sampled from from its parent $+$ node.
\begin{Lemma}\label{lem:sample}
The function $\sampmon$ complete in $O(\log{k} \cdot k \cdot depth(\etree))$ time, where $k = \degree(poly(\abs{\etree})$. Upon completion, with probability $\frac{|\coef|}{\abs{\etree}(1,\ldots, 1)}$, $\sampmon$ returns the sampled term $\left(\monom, sign(\coef)\right)$ from $\expandtree{\abs{\etree}}$.
\end{Lemma}
\begin{Theorem}\label{lem:mon-samp}
If the contracts for $\onepass$ and $\sampmon$ hold, then for any $\etree$ with $\degree(poly(|\etree|)) = k$, algorithm \ref{alg:mon-sam} outputs an estimate $\mathcal{X}$ of $\rpoly(\prob_1,\ldots, \prob_\numvar)$ %within an additive $\error \cdot \abs{\etree}(1,\ldots, 1)$ error with
with bound $P\left(\left|\mathcal{X} - \expct\pbox{\mathcal{X}}\right|\geq \error \cdot \abs{\etree}(1,\ldots, 1)\right) \leq \conf$, in $O\left(\treesize(\etree) + \left(\frac{\log{\frac{1}{\conf}}}{\error^2} \cdot k \cdot\log{k} \cdot depth(\etree)\right)\right)$ time.
\end{Theorem}
\begin{proof}[Proof of Theorem \ref{lem:mon-samp}]
Consider $\expandtree{\etree}$ and let $(\monom, \coef)$ be an arbitrary tuple in $\expandtree{\etree}$. For convenience, over an alphabet $\Sigma$ of size $\numvar$, define $\evalmp: \left(\left\{\monom^a~|~\monom \in \Sigma^b, a \in \mathbb{N}, b \in [k]\right\}, [0, 1]^\numvar\right)\mapsto \mathbb{R}$, a function that takes a monomial $\monom$ in $\left\{\monom^a ~|~ \monom \in \Sigma^b, a \in \mathbb{N}, b \in [k]\right\}$ and probability vector $\vct{p}$ (introduced in ~\cref{subsec:def-data}) as input and outputs the evaluation of $\monom$ over $\vct{p}$. By ~\cref{lem:sample}, the sampling scheme samples $(\monom, \coef)$ in $\expandtree{\etree}$ with probability $\frac{|\coef|}{\abs{\etree}(1,\ldots, 1)}$. Note that $\coef \cdot \evalmp(\monom, \vct{p})$ is the value of $(\monom, \coef)$ in $\expandtree{\etree}$ when all variables in $\monom$ are assigned their corresponding probabilities. Note again that the sum of this computation over $\expandtree{\etree}$ is equivalently $\rpoly(\prob_1,\ldots, \prob_\numvar)$.
Consider now a set of $\samplesize$ random variables $\vct{\randvar}$, where each $\randvar_i$ is distributed as described above. Then for random variable $\randvar_i$, it is the case that
$\expct\pbox{\randvar_i} = \sum\limits_{(\monom, \coef) \in \expandtree{\etree}}\frac{\coef \cdot \evalmp(\monom, p)}{\sum\limits_{(\monom, \coef) \in \expandtree{\etree}}|\coef|} = \frac{\rpoly(\prob_1,\ldots, \prob_\numvar)}{\abs{\etree}(1,\ldots, 1)}$. Let $\empmean = \frac{1}{\samplesize}\sum_{i = 1}^{\samplesize}\randvar_i$. It is also true that
\[\expct\pbox{\empmean} = \expct\pbox{ \frac{1}{\samplesize}\sum_{i = 1}^{\samplesize}\randvar_i} = \frac{1}{\samplesize}\sum_{i = 1}^{\samplesize}\expct\pbox{\randvar_i} = \frac{1}{\samplesize}\sum_{i = 1}^{\samplesize}\sum\limits_{(\monom, \coef) \in \expandtree{\etree}}\frac{\coef \cdot \evalmp(\monom, \vct{p})}{\sum\limits_{(\monom, \coef) \in \expandtree{\etree}}|\coef|} = \frac{\rpoly(\prob_1,\ldots, \prob_\numvar)}{\abs{\etree}(1,\ldots, 1)}.\]
Hoeffding' inequality can be used to compute an upper bound on the number of samples $\samplesize$ needed to establish the $(\error, \conf)$-bound. The inequality states that if we know that each $\randvar_i$ is strictly bounded by the intervals $[a_i, b_i]$, then it is true that
\begin{equation*}
P\left(\left|\empmean - \expct\pbox{\empmean}\right| \geq \error\right) \leq 2\exp{\left(-\frac{2\samplesize^2\error^2}{\sum_{i = 1}^{\samplesize}(b_i -a_i)^2}\right)}.
\end{equation*}
As implied above, Hoeffding is assuming the sum of random variables be divided by the number of variables. Since $\rpoly(\prob_1,\ldots, \prob_\numvar) = \expct\pbox{\empmean} \cdot \abs{\etree}(1,\ldots, 1)$, then our estimate is the sum of random samples multiplied by $\frac{\abs{\etree}(1,\ldots, 1)}{\samplesize}$. This computation is performed on ~\cref{alg:mon-sam-global3}.
%Also see that to properly estimate $\rpoly$, it is necessary to multiply by the number of monomials in $\rpoly$, i.e. $\abs{\etree}(1,\ldots, 1)$. Therefore it is the case that $\frac{acc}{N}$ gives the estimate of one monomial, and multiplying by $\abs{\etree}(1,\ldots, 1)$ yields the estimate of $\rpoly(\prob_1,\ldots, \prob_\numvar)$. This scaling is performed in line ~\ref{alg:mon-sam-global3}.
Line ~\ref{alg:mon-sam-sample} shows that $\vari{sgn}_\vari{i}$ has a value in $\{-1, 1\}$ that is mulitplied with at most $\degree(\polyf(\abs{\etree}))$ factors from $\vct{p}$ (\cref{alg:mon-sam-product2}) such that each $p_i$ is in $[0, 1]$, the range for each $\randvar_i$ ($\vari{Y}_\vari{i}$ in the psuedo code) is then strictly bounded by $[-1, 1]$. Bounding Hoeffding's results by $\conf$ ensures confidence no less than $1 - \conf$. Then by upperbounding Hoeffding with $\conf$, it is the case that
\begin{equation*}
P\pbox{~\left| \empmean - \expct\pbox{\empmean} ~\right| \geq \error} \leq 2\exp{\left(-\frac{2\samplesize^2\error^2}{2^2 \samplesize}\right)} \leq \conf.
\end{equation*}
Solving for the number of samples $\samplesize$ we get
\begin{align}
&\conf \geq 2\exp{-\left(\frac{2\samplesize^2\error^2}{4\samplesize}\right)}\label{eq:hoeff-1}\\
%&\frac{\conf}{2} \geq \exp{-\left(\frac{2\samplesize^2\error^2}{4\samplesize}\right)}\label{eq:hoeff-2}\\
%&\frac{2}{\conf} \leq \exp{\left(\frac{2\samplesize^2\error^2}{4\samplesize}\right)}\label{eq:hoeff-3}\\
%&\log{\frac{2}{\conf}} \leq \left(\frac{2\samplesize^2\error^2}{4\samplesize}\right)\label{eq:hoeff-4}\\
%&\log{\frac{2}{\conf}} \leq \frac{\samplesize\error^2}{2}\label{eq:hoeff-5}\\
&\frac{2\log{\frac{2}{\conf}}}{\error^2} \leq \samplesize.\label{eq:hoeff-6}
\end{align}
By Hoeffding we obtain the number of samples necessary to acheive the claimed additive error bounds.
This concludes the proof for the first claim of theorem ~\ref{lem:mon-samp}.
\paragraph{Run-time Analysis}
Note that lines ~\ref{alg:mon-sam-global1}, ~\ref{alg:mon-sam-global2}, and ~\ref{alg:mon-sam-global3} are $O(1)$ global operations. The call to $\onepass$ in line ~\ref{alg:mon-sam-onepass} by lemma ~\ref{lem:one-pass} is $O(\treesize(\etree))$ time.
%First, algorithm ~\ref{alg:mon-sam} calls \textsc{OnePass} which takes $O(|\etree|)$ time.
Then for $\numsamp = \ceil{\frac{2 \log{\frac{2}{\conf}}}{\error^2}}$, the $O(1)$ assignment, product, and addition operations occur. Over the same $\numsamp$ iterations, $\sampmon$ is called, with a runtime of $O(\log{k}\cdot k \cdot depth(\etree))$ by lemma ~\ref{lem:sample}. Finally, over the same iterations, because $\degree(\polyf(\abs{\etree})) = k$, the assignment and product operations of line ~\ref{alg:mon-sam-product2} are called at most $k$ times.
Thus we have $O(\treesize(\etree)) + O(\frac{\log{\frac{1}{\conf}}}{\error^2} \cdot \left(k + \log{k}\cdot k \cdot depth(\etree)\right) = O\left(\treesize(\etree) + \left(\frac{\log{\frac{1}{\conf}}}{\error^2} \cdot \left(k \cdot\log{k} \cdot depth(\etree)\right)\right)\right)$ overall running time.
\end{proof}
\qed
\AH{Why did we drop the $k \cdot \log{k} \cdot depth(\etree)$ factor in what follows below?}
\begin{proof}[Proof of Theorem \ref{lem:approx-alg}]
%\begin{Corollary}\label{cor:adj-err}
Setting $\error = \error \cdot \frac{\rpoly(\prob_1,\ldots, \prob_\numvar)}{\abs{\etree}(1,\ldots, 1)}$ achieves $1 \pm \epsilon$ multiplicative error bounds, in $O\left(\treesize(\etree) + \frac{\log{\frac{1}{\conf}}\cdot \abs{\etree}^2(1,\ldots, 1)}{\error^2\cdot\rpoly^2(\prob_1,\ldots, \prob_\numvar)}\right)$.
%\end{Corollary}
Since it is the case that we have $\error \cdot \abs{\etree}(1,\ldots, 1)$ additive error, one can set $\error = \error \cdot \frac{\rpoly(\prob_1,\ldots, \prob_\numvar)}{\abs{\etree}(1,\ldots, 1)}$, yielding a multiplicative error proportional to $\rpoly(\prob_1,\ldots, \prob_\numvar)$. This only affects the runtime in the number of samples taken, changing the first factor of the second summand of the original runtime accordingly.
The derivation over the number of samples is then
\begin{align*}
&\frac{2\log{\frac{2}{\conf}}}{\error^2 \left(\frac{\rpoly(\prob_1,\ldots, \prob_N)}{\abs{\etree}(1,\ldots, 1)}\right)^2}\\
= &\frac{2\log{\frac{2}{\conf}}\cdot \abs{\etree}^2(1,\ldots, 1)}{\error^2 \cdot \rpoly^2(\prob_1,\ldots, \prob_\numvar)},
\end{align*}
and the runtime then follows, thus upholding ~\cref{lem:approx-alg}.
\end{proof}
\qed
\subsection{OnePass Algorithm}
\subsubsection{Description}
Algorithm ~\ref{alg:one-pass} satisfies the requirements of lemma ~\ref{lem:one-pass}.
The evaluation of $\abs{\etree}(1,\ldots, 1)$ can be defined recursively, as follows:
\begin{align*}
\abs{\etree}(1,\ldots, 1) = \begin{cases}
\abs{\etree_\lchild}(1,\ldots, 1) \cdot \abs{\etree_\rchild}(1,\ldots, 1) &\textbf{if }\etree.\type = \times\\
\abs{\etree_\lchild}(1,\ldots, 1) + \abs{\etree_\rchild}(1,\ldots, 1) &\textbf{if }\etree.\type = + \\
|\etree.\val| &\textbf{if }\etree.\type = \tnum\\
1 &\textbf{if }\etree.\type = \var.
\end{cases}
\end{align*}
%\begin{align*}
%&\eval{\etree ~|~ \etree.\type = +}_{\abs{\etree}} =&& \eval{\etree_\lchild}_{\abs{\etree}} + \eval{\etree_\rchild}_{\abs{\etree}}\\
%&\eval{\etree ~|~ \etree.\type = \times}_{\abs{\etree}} = && \eval{\etree_\lchild}_{\abs{\etree}} \cdot \eval{\etree_\rchild}_{\abs{\etree}}\\
%&\eval{\etree ~|~ \etree.\type = \tnum}_{\abs{\etree}} = && \etree.\val\\
%&\eval{\etree ~|~ \etree.\val = \var}_{\abs{\etree}} = && 1
%\end{align*}
In the same fashion the weighted distribution can be described as above with the following modification for the case when $\etree.\type = +$:
\begin{align*}
&\abs{\etree_\lchild}(1,\ldots, 1) + \abs{\etree_\rchild}(1,\ldots, 1); \etree_\lchild.\vari{weight} \gets \frac{\abs{\etree_\lchild}(1,\ldots, 1)}{\abs{\etree_\lchild}(1,\ldots, 1) + \abs{\etree_\rchild}(1,\ldots, 1)}, \etree_\rchild.\vari{weight} \gets \frac{\abs{\etree_\rchild}(1,\ldots, 1)}{\abs{\etree_\lchild}(1,\ldots, 1)+ \abs{\etree_\rchild}(1,\ldots, 1)} &\textbf{if }\etree.\type = +
\end{align*}
%\begin{align*}
%&\eval{\etree~|~\etree.\type = +}_{\wght} =&&\eval{\etree_\lchild}_{\abs{\etree}} + \eval{\etree_\rchild}_{\abs{\etree}}; \etree_\lchild.\wght = \frac{\eval{\etree_\lchild}_{\abs{\etree}}}{\eval{\etree_\lchild}_{\abs{\etree}} + \eval{\etree_\rchild}_{\abs{\etree}}}; \etree_\rchild.\wght = \frac{\eval{\etree_\rchild}_{\abs{\etree}}}{\eval{\etree_\lchild}_{\abs{\etree}} + \eval{\etree_\rchild}_{\abs{\etree}}}
%\end{align*}
Algorithm ~\ref{alg:one-pass} essentially implements the above definitions.
\subsubsection{Psuedo Code}
See algorithm ~\ref{alg:one-pass} for details.
\begin{algorithm}[h!]
\caption{\onepass$(\etree)$}
\label{alg:one-pass}
\begin{algorithmic}[1]
\Require \etree: Binary Expression Tree
\Ensure \etree: Binary Expression Tree
\Ensure \vari{sum} $\in \mathbb{R}$
\If{$\etree.\type = +$}\label{alg:one-pass-equality1}
\State $\accum \gets 0$\label{alg:one-pass-plus-assign1}
\For{$child$ in $\etree.\vari{children}$}\Comment{Sum up all children coefficients}
\State $(child, \vari{s}) \gets \onepass(child)$
\State $\accum \gets \accum + \vari{s}$\label{alg:one-pass-plus-add}
\EndFor
\State $\etree.\vari{partial} \gets \accum$\label{alg:one-pass-plus-assign2}
\For{$child$ in $\etree.\vari{children}$}\Comment{Record distributions for each child}
\State $child.\vari{weight} \gets \frac{\vari{child.partial}}{\etree.\vari{partial}}$\label{alg:one-pass-plus-prob}
\EndFor
\State $\vari{sum} \gets \etree.\vari{partial}$\label{alg:one-pass-plus-assign3}
\State \Return (\etree, \vari{sum})
\ElsIf{$\etree.\type = \times$}\label{alg:one-pass-equality2}
\State $\accum \gets 1$\label{alg:one-pass-times-assign1}
\For{$child \text{ in } \etree.\vari{children}$}\Comment{Compute the product of all children coefficients}
\State $(child, \vari{s}) \gets \onepass(child)$
\State $\accum \gets \accum \times \vari{s}$\label{alg:one-pass-times-product}
\EndFor
\State $\etree.\vari{partial}\gets \accum$\label{alg:one-pass-times-assign2}
\State $\vari{sum} \gets \etree.\vari{partial}$\label{alg:one-pass-times-assign3}
\State \Return (\etree, \vari{sum})
\ElsIf{$\etree.\type = numeric$}\Comment{Base case}\label{alg:one-pass-equality3}
\State $\vari{sum} \gets |\etree.\val|$\label{alg:one-pass-leaf-assign1}\Comment{This step effectively converts $\etree$ into $\abs{\etree}$}
\State \Return (\etree, \vari{sum})
\Else\Comment{$\etree.\type = \var$}\label{alg:one-pass-equality4}
\State $\vari{sum} \gets 1$\label{alg:one-pass-global-assign}
\State \Return (\etree, \vari{sum})
\EndIf
\end{algorithmic}
\end{algorithm}
\begin{Example}\label{example:one-pass}
Consider the when $\etree$ is $+\left(\times\left(+\left(\times\left(1, x_1\right), \times\left(1, x_2\right)\right), +\left(\times\left(1, x_1\right) as seen in ~\cref{fig:expr-tree-T-wght}, \times\left(-1, x_2\right)\right)\right), \times\left(\times\left(1, x_2\right), \times\left(1, x_2\right)\right)\right)$, which encodes the expression $(x_1 + x_2)(x_1 - x_2) + x_2^2$. After one pass, \cref{alg:one-pass} would have computed the following weight distribution. For the two children of the root $+$ node $\etree$, $\etree_\lchild.\wght = \frac{4}{5}$ and $\etree_\rchild.\wght = \frac{1}{5}$. Similarly, $\stree \gets \etree_\lchild$, $\stree_\lchild.\wght = \stree_\rchild.\wght = \frac{1}{2}$. Note that in this example, the sampling probabilities for the children of each inner $+$ node of $\stree$ are equal to one another because both parents have the same number of children, and, in each case, the children of each parent $+$ node share the same $|\coef_i|$.
\end{Example}
\begin{figure}[h!]
\begin{tikzpicture}[thick, every tree node/.style={default_node, thick, draw=black, black, circle, text width=0.3cm, font=\bfseries, minimum size=0.65cm}, every child/.style={black}, edge from parent/.style={draw, thick},
level 1/.style={sibling distance=2.5cm},
level 2/.style={sibling distance=1.25cm},
%level 2+/.style={sibling distance=0.625cm}
%level distance = 1.25cm,
%sibling distance = 1cm,
%every node/.append style = {anchor=center}
]
\Tree [.\node(root){$\boldsymbol{+}$};
\edge [wght_color] node[midway, auto= right, font=\bfseries, gray] {$\bsym{\frac{4}{5}}$}; [.\node[highlight_color](tl){$\boldsymbol{\times}$};
[.\node(s){$\bsym{+}$};
\edge[wght_color] node[pos=0.35, left, font=\bfseries, gray]{$\bsym{\frac{1}{2}}$}; [.\node[highlight_color](sl){$\bsym{x_1}$}; ]
\edge[wght_color] node[pos=0.35, right, font=\bfseries, gray]{$\bsym{\frac{1}{2}}$}; [.\node[highlight_color](sr){$\bsym{x_2}$}; ]
]
[.\node(sp){$\bsym{+}$};
\edge[wght_color] node[pos=0.35, left, font=\bfseries, gray]{$\bsym{\frac{1}{2}}$}; [.\node[highlight_color](spl){$\bsym{x_1}$}; ]
\edge[wght_color] node[pos=0.35, right, font=\bfseries, gray]{$\bsym{\frac{1}{2}}$}; [.\node[highlight_color](spr){$\bsym{\times}$};
[.$\bsym{-1}$ ] [.$\bsym{x_2}$ ]
]
]
]
\edge [wght_color] node[midway, auto=left, font=\bfseries, gray] {$\bsym{\frac{1}{5}}$}; [.\node[highlight_color](tr){$\boldsymbol{\times}$};
[.$\bsym{x_2}$
\edge [draw=none]; [.\node[draw=none]{}; ]
\edge [draw=none]; [.\node[draw=none]{}; ]
]
[.$\bsym{x_2}$ ] ]
]
% labels for plus node children, with arrows
\node[left=2pt of sl, highlight_color, inner sep=0pt] (sl-label) {$\stree_\lchild$};
\draw[highlight_color] (sl) -- (sl-label);
\node[right=2pt of sr, highlight_color, inner sep=0pt] (sr-label) {$\stree_\rchild$};
\draw[highlight_color] (sr) -- (sr-label);
\node[left=2pt of spl, inner sep=0pt, highlight_color](spl-label) {$\stree_\lchild'$};
\draw[highlight_color] (spl) -- (spl-label);
\node[right=2pt of spr, highlight_color, inner sep=0] (spr-label) {$\stree_\rchild'$};
\draw[highlight_color] (spr) -- (spr-label);
\node[above left=2pt of tl, inner sep=0pt, highlight_color] (tl-label) {$\etree_\lchild$};
\draw[highlight_color] (tl) -- (tl-label);
\node[above right=2pt of tr, highlight_color, inner sep=0pt] (tr-label) {$\etree_\rchild$};
\node[above = 2pt of root, highlight_color, inner sep=0pt, font=\bfseries] (root-label) {$\etree$};
\node[above = 2pt of s, highlight_color, inner sep=0pt, font=\bfseries] (s-label) {$\stree$};
\node[above = 2pt of sp, highlight_color, inner sep=0pt, font=\bfseries] (sp-label) {$\stree'$};
\draw[highlight_color] (tr) -- (tr-label);
% \draw[<-|, highlight_color] (s) -- (s-label);
% \draw[<-|, highlight_color] (sp) -- (sp-label);
% \draw[<-|, highlight_color] (root) -- (root-label);
%\node[above right=0.7cm of TR, highlight_color, inner sep=0pt, font=\bfseries] (tr-comment) {$\etree_\rchild$};
% \draw[<-|, highlight_color] (TR) -- (tr-comment);
\end{tikzpicture}
% \begin{tikzpicture}[thick, level distance=1.2cm, level 1/.style={sibling distance= 5cm}, level 2/.style={sibling distance=3cm}, level 3/.style={sibling distance=1.5cm}, level 4/.style={sibling distance= 1cm}, every child/.style={black}]
% \node[tree_node](root) {$\boldsymbol{+}$}
% child[red]{node[tree_node](tl) {$\boldsymbol{\times}$}
% child{node[tree_node] {$\boldsymbol{+}$}
% child{node[tree_node]{$\boldsymbol{x_1}$} }
% child{node[tree_node] {$\boldsymbol{x_2}$}}
% }
% child{node[tree_node] {$\boldsymbol{+}$}
% child{node[tree_node] {$\boldsymbol{x_1}$}}
% %child[missing]{node[tree_node] {$\boldsymbol{1}$}}
% child[red]{node[tree_node] {$\boldsymbol{\times}$}
% child{node[tree_node] {$\boldsymbol{-1}$}}
% child{node[tree_node] {$\boldsymbol{x_2}$}}
% }
% }
% }
% child{node[tree_node] {$\boldsymbol{\times}$} edge from parent [red]
% child{node[tree_node] {$\boldsymbol{x_2}$}}
% child{node[tree_node] {$\boldsymbol{x_2}$}}
% };
% \node[font=\bfseries, red] at (-2.8, -0.2) {$\etree_\lchild.\wght \boldsymbol{= \frac{4}{5} } $};
% \end{tikzpicture}
\caption{Weights computed by $\onepass$ in ~\cref{example:one-pass}.}
\label{fig:expr-tree-T-wght}
\end{figure}
\subsubsection{Correctness of Algorithm ~\ref{alg:one-pass}}
\begin{proof}[Proof of Lemma ~\ref{lem:one-pass}]
We prove the first part of lemma ~\ref{lem:one-pass}, i.e., correctness, by structural induction over the depth $d$ of the binary tree $\etree$.
For the base case, $d = 0$, it is the case that the node is a leaf and therefore by definition ~\ref{def:express-tree} must be a variable or coefficient. When it is a variable, \textsc{OnePass} returns $1$, and we have in this case that $\polyf(\etree) = X_i = \polyf(\abs{\etree})$ for some $i$ in $[\numvar]$, and this evaluated at all $1$'s indeed gives $1$, verifying the correctness of the returned value of $\abs{\etree}(1,\ldots, 1) = 1$. When the root is a coefficient, the absolute value of the coefficient is returned, which is indeed $\abs{\etree}(1,\ldots, 1)$. This proves the base case.
Let the inductive hypothesis be the assumption that for $d \leq k$ for $k \geq 1$, lemma ~\ref{lem:one-pass} is true for algorithm ~\ref{alg:one-pass}.
Now prove that lemma ~\ref{lem:one-pass} holds for $k + 1$. Notice that $\etree$ has at most two children, $\etree_\lchild$ and $\etree_\rchild$. Note also, that for each child, it is the case that $d \leq k$. Then, by inductive hypothesis, lemma ~\ref{lem:one-pass} holds for each existing child, and we are left with two possibilities for $\etree$. The first case is when $\etree$ is a $+$ node. When this happens, algorithm ~\ref{alg:one-pass} computes $|T_\lchild|(1,\ldots, 1) + |T_\rchild|(1,\ldots, 1)$ on line ~\ref{alg:one-pass-plus-add} which by definition is $\abs{\etree}(1,\ldots, 1)$ and hence the inductive hypothesis holds in this case. For the distribution of the children of $+$, algorithm ~\ref{alg:one-pass} computes $P(\etree_i) = \frac{|T_i|(1,\ldots, 1)}{|T_\lchild|(1,\ldots, 1) + |T_\rchild|(1,\ldots, 1)}$ which is indeed the case. The second case is when the $\etree.\val = \times$. By inductive hypothesis, it is the case that both $\abs{\etree_\lchild}\polyinput{1}{1}$ and $\abs{\etree_\rchild}\polyinput{1}{1}$ have been correctly computed. On line~\ref{alg:one-pass-times-product} algorithm ~\ref{alg:one-pass} then computes the product of the subtree partial values, $|T_\lchild|(1,\ldots, 1) \times |T_\rchild|(1,\ldots, 1)$ which by definition is $\abs{\etree}(1,\ldots, 1)$.
%That $\onepass$ makes exactly one traversal of $\etree$ follows by noting for lines ~\ref{alg:one-pass-equality1} and ~\ref{alg:one-pass-equality2} are the checks for the non-base cases, where in each matching exactly one recursive call is made on each of $\etree.\vari{children}$. For the base cases, lines ~\ref{alg:one-pass-equality3} and ~\ref{alg:one-pass-equality4} both return values without making any further recursive calls. Since all nodes are covered by the cases, and the base cases cover only leaf nodes, it follows that algorithm ~\ref{alg:one-pass} then terminates after it visits every node exactly one time.
To conclude, note that when $\etree.\type = +$, the compuatation of $\etree_\lchild.\wght$ and $\etree_\rchild.\wght$ are solely dependent on the correctness of $\abs{\etree}\polyinput{1}{1}$, $\abs{\etree_\lchild}\polyinput{1}{1}$, and $\abs{\etree_\rchild}\polyinput{1}{1}$, which have already been argued to be correct.
\paragraph{Run-time Analysis}
The runtime for \textsc{OnePass} is fairly straight forward. Note that line ~\ref{alg:one-pass-equality1}, ~\ref{alg:one-pass-equality2}, and ~\ref{alg:one-pass-equality3} give a constant number of equality checks per node. Then, for $+$ nodes, lines ~\ref{alg:one-pass-plus-add} and ~\ref{alg:one-pass-plus-prob} (note there is a \textit{constant} factor of $2$ here) perform a constant number of arithmetic operations, while ~\ref{alg:one-pass-plus-assign1} ~\ref{alg:one-pass-plus-assign2}, and ~\ref{alg:one-pass-plus-assign3} all have $O(1)$ assignments. Similarly, when a $\times$ node is visited, lines \ref{alg:one-pass-times-assign1}, \ref{alg:one-pass-times-assign2}, and \ref{alg:one-pass-times-assign3} have $O(1)$ assignments, while line ~\ref{alg:one-pass-times-product} has $O(1)$ product operations per node. For leaf nodes, ~\cref{alg:one-pass-leaf-assign1} and ~\cref{alg:one-pass-global-assign} are both $O(1)$ assignment.
Thus, the algorithm visits each node of $\etree$ one time, with a constant number of operations for all of the $+$, $\times$, and leaf nodes, leading to a runtime of $O\left(\treesize(\etree)\right)$, and completes the proof.
\end{proof}
\qed
\subsection{Sample Algorithm}
Algorithm ~\ref{alg:sample} takes $\etree$ as input, samples an arbitrary $(\monom, \coef)$ from $\expandtree{\etree}$ with probabilities $\stree_\lchild.\wght$ and $\stree_\rchild.\wght$ for each subtree $\stree$ with $\stree.\type = +$, outputing the tuple $(\monom, \sign(\coef))$. While one cannot compute $\expandtree{\etree}$ in time better than $O(N^k)$, the algorithm, similar to \textsc{OnePass}, uses a technique on $\etree$ which produces a sample from $\expandtree{\etree}$ without ever materializing $\expandtree{\etree}$.
Algorithm ~\ref{alg:sample} selects a monomial from $\expandtree{\etree}$ by the following top-down traversal. For a parent $+$ node, a subtree is chosen over the previously computed weighted sampling distribution. When a parent $\times$ node is visited, both children are visited. All variable leaf nodes of the subgraph traversal are added to a set. Additionally, the product of signs over all coefficient leaf nodes of the subgraph traversal is computed. The algorithm returns a set of the distinct variables of which the monomial is composed and the monomial's sign.
\begin{Definition}[TreeSet]
A TreeSet is a datastructure whose elements form a set, each of which are stored in a binary tree.
\end{Definition}
Note that as stated, a TreeSet then facilitates logarithmic insertion.
\subsubsection{Pseudo Code}
See algorithm ~\ref{alg:sample} for the details of $\sampmon$ algorithm.
\begin{algorithm}
\caption{\sampmon(\etree)}
\label{alg:sample}
\begin{algorithmic}[1]
\Require \etree: Binary Expression Tree
\Ensure \vari{vars}: TreeSet
\Ensure \vari{sgn} $\in \{-1, 1\}$
\Comment{Algorithm ~\ref{alg:one-pass} should have been run before algorithm ~\ref{alg:sample}}
\State $\vari{vars} \gets \emptyset$ \label{alg:sample-global1}
\If{$\etree.\type = +$}\Comment{Sample at every $+$ node}
\State $\etree_{\vari{samp}} \gets$ Sample from left subtree ($\etree_{\lchild}$) and right subtree ($\etree_{\rchild}$) w.p. $\etree_\lchild.\wght$ and $\etree_\rchild.\wght$. \label{alg:sample-plus-bsamp}
\State $(\vari{v}, \vari{s}) \gets \sampmon(\etree_{\vari{samp}})$\label{alg:sample-plus-traversal}
% \State $\vari{vars} \gets \vari{vars} \;\cup \;\{\vari{v}\}$\label{alg:sample-plus-union}
% \State $\vari{sgn} \gets \vari{sgn} \times \vari{s}$\label{alg:sample-plus-product}
\State $\Return ~(\vari{v}, \vari{s})$
\ElsIf{$\etree.\type = \times$}\Comment{Multiply the sampled values of all subtree children}
\State $\vari{sgn} \gets 1$\label{alg:sample-global2}
\For {$child$ in $\etree.\vari{children}$}
\State $(\vari{v}, \vari{s}) \gets \sampmon(child)$
\State $\vari{vars} \gets \vari{vars} \cup \{\vari{v}\}$\label{alg:sample-times-union}
\State $\vari{sgn} \gets \vari{sgn} \times \vari{s}$\label{alg:sample-times-product}
\EndFor
\State $\Return ~(\vari{vars}, \vari{sgn})$
\ElsIf{$\etree.\type = numeric$}\Comment{The leaf is a coefficient}
%\State $\vari{sgn} \gets \vari{sgn} \times sign(\etree.\val)$
\State $\Return ~\left(\{\}, sign(\etree.\val)\right)$\label{alg:sample-num-return}
\ElsIf{$\etree.\type = var$}
%\State $\vari{vars} \gets \vari{vars} \; \cup \; \{\;\etree.\val\;\}\label{alg:sample-var-union}$\Comment{Add the variable to the set}
\State $\Return~\left(\{\etree.\val\}, 1\right) $\label{alg:sample-var-return}
\EndIf
\end{algorithmic}
\end{algorithm}
\subsubsection{Correctness of Algorithm ~\ref{alg:sample}}
\begin{proof}[Proof of Lemma ~\ref{lem:sample}]
First, we need to show that $\sampmon$ indeed returns a monomial $\monom$, such that $(\monom, \coef)$ is in $\expandtree{\etree}$.
For the base case of the depth $d$ of $\etree$ is $0$, we have that the root node is either a constant $\coef$ for which case lines ~\ref{alg:sample-global1} and ~\ref{alg:sample-num-return} we return $\{~\}$, or we have that $\etree.\type = \var$ and $\etree.\val = x$, in which case by lines ~\ref{alg:sample-var-union} and ~\ref{alg:sample-var-return} we return $\{x\}$. Both cases satisfy the definition of a monomial, and the base case is proven.
By inductive hyptothesis, assume that for $d \leq k$ for $k \geq 0$, that it is indeed the case that $\sampmon$ returns a monomial.
For the inductive step, let us take a tree $\etree$ with $d = k + 1$. Note that each child has depth $d \leq k$, and by inductive hyptothesis both of them return a valid monomial. Then the root can be either a $+$ or $\times$ node. For the case of a $+$ root node, line ~\ref{alg:sample-plus-bsamp} of $\sampmon$ will choose one of the children of the root. Since by hypothesis it is the case that a monomial is being returned from either child, and only one of these monomials is selected, we have for the case of $+$ root node that a valid monomial is returned by $\sampmon$. When the root is a $\times$ node, lines ~\ref{alg:sample-times-union} and ~\ref{alg:sample-times-product} multiply the monomials returned by the two children of the root, and by definition ~\ref{def:monomial} the product of two monomials is also a monomial, which means that $\sampmon$ returns a vaild monomial for the $\times$ root node, thus concluding the fact that $\sampmon$ indeed returns a monomial.
%Note that for any monomial sampled by algorithm ~\ref{alg:sample}, the nodes traversed form a subgraph of $\etree$ that is \textit{not} a subtree in the general case. We thus seek to prove that the subgraph traversed produces the correct probability corresponding to the monomial sampled.
We seek to prove by induction on the depth $d$ of $\etree$ that the subgraph traversed by $\sampmon$ has a probability that is in accordance with the monomial sampled, $\frac{|\coef|}{\abs{\etree}\polyinput{1}{1}}$.
For the base case $d = 0$, by definition ~\ref{def:express-tree} we know that the root has to be either a coefficient or a variable. For either case, the probability of the value returned is $1$ since there is only one value to sample from. When the root is a variable $x$ the algorithm correctly returns $(\{x\}, 1 )$. When the root is a coefficient, \sampmon ~correctly returns $(\{~\}, sign(\coef_i) \times 1)$.
For the inductive hypothesis, assume that for $d \leq k$ and $k \geq 0$ $\sampmon$ indeed samples $\monom$ in $(\monom, \coef)$ in $\expandtree{\etree}$ with probability $\frac{|\coef|}{\abs{\etree}\polyinput{1}{1}}$.%bove is true.%lemma ~\ref{lem:sample} is true.
Prove now, that when $d = k + 1$ the correctness holds. It is the case that the root of $\etree$ has up to two children $\etree_\lchild$ and $\etree_\rchild$. Since $\etree_\lchild$ and $\etree_\rchild$ are both depth $d \leq k$, by inductive hypothesis correctness holds for both of them, thus, $\sampmon$ has sampled both monomials $\monom_\lchild$ in $(\monom_\lchild, \coef_\lchild)$ of $\expandtree{\etree_\lchild}$ and $\monom_\rchild$ in $(\monom_\rchild, \coef_\rchild)$ of $\expandtree{\etree_\rchild}$, from $\etree_\lchild$ and $\etree_\rchild$ with probability $\frac{|\coef_\lchild|}{\abs{\etree_\lchild}\polyinput{1}{1}}$ and $\frac{|\coef_\rchild|}{\abs{\etree_\rchild}\polyinput{1}{1}}$.
Then the root has to be either a $+$ or $\times$ node.
Consider the case when the root is $\times$. Note that we are sampling a term from $\expandtree{\etree}$. Consider $(\monom, \coef)$ in $\expandtree{\etree}$, where $\monom$ is the sampled monomial. Notice also that it is the case that $\monom = \monom_\lchild \times \monom_\rchild$, where $\monom_\lchild$ is coming from $\etree_\lchild$ and $\monom_\rchild$ from $\etree_\rchild$. The probability that \sampmon$(\etree_{\lchild})$ returns $\monom_\lchild$ is $\frac{|\coef_{\monom_\lchild}|}{|\etree_\lchild|(1,\ldots, 1)}$ and $\frac{|\coef_{\monom_\lchild}|}{\abs{\etree_\rchild}\polyinput{1}{1}}$ for $\monom_R$. Since both $\monom_\lchild$ and $\monom_\rchild$ are sampled with independent randomness, the final probability for sample $\monom$ is then $\frac{|\coef_{\monom_\lchild}| \cdot |\coef_{\monom_R}|}{|\etree_\lchild|(1,\ldots, 1) \cdot |\etree_\rchild|(1,\ldots, 1)}$. For $(\monom, \coef)$ in \expandtree{\etree}, it is indeed the case that $|\coef_i| = |\coef_{\monom_\lchild}| \cdot |\coef_{\monom_\rchild}|$ and that $\abs{\etree}(1,\ldots, 1) = |\etree_\lchild|(1,\ldots, 1) \cdot |\etree_\rchild|(1,\ldots, 1)$, and therefore $\monom$ is sampled with correct probability $\frac{|\coef_i|}{\abs{\etree}(1,\ldots, 1)}$.
For the case when $\etree.\val = +$, \sampmon ~will sample monomial $\monom$ from one of its children. By inductive hypothesis we know that any $\monom_\lchild$ in $\expandtree{\etree_\lchild}$ and any $\monom_\rchild$ in $\expandtree{\etree_\rchild}$ will both be sampled with correct probability $\frac{|\coef_{\monom_\lchild}|}{\etree_{\lchild}(1,\ldots, 1)}$ and $\frac{|\coef_{\monom_\rchild}|}{|\etree_\rchild|(1,\ldots, 1)}$, where either $\monom_\lchild$ or $\monom_\rchild$ will equal $\monom$, depending on whether $\etree_\lchild$ or $\etree_\rchild$ is sampled. Assume that $\monom$ is sampled from $\etree_\lchild$, and note that a symmetric argument holds for the case when $\monom$ is sampled from $\etree_\rchild$. Notice also that the probability of sampling $\etree_\lchild$ from $\etree$ is $\frac{\abs{\etree_\lchild}\polyinput{1}{1}}{\abs{\etree_\lchild}\polyinput{1}{1} + \abs{\etree_\rchild}\polyinput{1}{1}}$ as computed by $\onepass$. Then, since $\sampmon$ goes top-down, and each sampling choice is independent (which follows from the randomness in the root of $\etree$ being independent from the randomness used in its subtrees), the probability for $\monom$ to be sampled from $\etree$ is equal to the product of the probability that $\etree_\lchild$ is sampled from $\etree$ and $\monom$ is sampled in $\etree_\lchild$, and
\begin{align*}
P(\sampmon(\etree) = \monom) = &P(\sampmon(\etree_\lchild) = \monom) \cdot P(SampledChild(\etree) = \etree_\lchild)\\
= &\frac{|\coef_\monom|}{|\etree_\lchild|(1,\ldots, 1)} \cdot \frac{\abs{\etree_\lchild}(1,\ldots, 1)}{|\etree_\lchild|(1,\ldots, 1) + |\etree_\rchild|(1,\ldots, 1)}\\
= &\frac{|\coef_\monom|}{\abs{\etree}(1,\ldots, 1)},
\end{align*}
and we obtain the desired result.
\paragraph{Run-time Analysis}
We now bound the number of recursive calls in $\sampmon$ by $O\left(k\cdot depth(\etree)\right)$. Take an arbitrary sample subgraph of expression tree $\etree$ of degree $k$ and pick an arbitrary level $i$. Call the number of $\times$ nodes in this level $y_i$, and the total number of nodes $x_i$. Given that both children of a $\times$ node are traversed in $\sampmon$ while only one child is traversed for a $+$ parent node, note that the number of nodes on level $i + 1$ in the general case is at most $y_i + x_i$, and the increase in the number of nodes from level $i$ to level $i + 1$ is upperbounded by $x_{i + 1} - x_i \leq y_i$.
Now, we prove by induction on the depth $d$ of tree $\etree$ the following claim.
\begin{Claim}\label{claim:num-nodes-level-i}
The number of nodes in expression tree $\etree$ at arbitrary level $i$ is bounded by the count of $\times$ nodes in levels $[0, i - 1] + 1$.
\end{Claim}
\begin{proof}[Proof of Claim ~\ref{claim:num-nodes-level-i}]
For the base case, $d = 0$, we have the following cases. For both cases, when $\etree.\type = \tnum$ and when $\etree.\type = \var$, it is trivial to see that the number of nodes on level $0$ = 1, which satisfies the identity of ~\cref{claim:num-nodes-level-i}, i.e., the number of $\times$ nodes in previous levels $+ 1$ = 1, and the base case is upheld.
Assume that for $d \leq k$ for $k \geq 0$ that ~\cref{claim:num-nodes-level-i} holds.
The inductive step is to show that for arbitrary $\etree$ with depth = $d + 1 \leq k + 1$ the claim still holds. Note that we have two possibilities for the value of $\etree$. First, $\etree.\type = +$, and it is the case in ~\cref{alg:sample-plus-traversal} that only one of $\etree_\lchild$ or $\etree_\rchild$ are part of the subgraph traversed by $\sampmon$. By inductive hypothesis, both subtrees satisfy the claim. Since only one child is part of the subgraph, there is exactly one node at level 1, which, as in the base case analysis, satisfies ~\cref{claim:num-nodes-level-i}. For the second case, $\etree.\type = \times$, $\sampmon$ traverses both children, and the number of nodes at level $1$ in the subgraph is then $2$, which satisfies ~\cref{claim:num-nodes-level-i} since the sum of $\times$ nodes in previous levels (level $0$) is $1$, and $1 + 1 = 2$, proving the claim.
\end{proof}
\qed
By ~\cref{def:degree}, a sampled monomial will have $O(k)$ $\times$ nodes, and this implies $O(k)$ nodes at $\leq$ $depth(\etree)$ levels of the $\sampmon$ subgraph, bounding the number of recursive calls to $O(k \cdot depth(\etree))$.
Globally, lines ~\ref{alg:sample-global1} and ~\ref{alg:sample-global2} are $O(1)$ time. For the $+$ node, line ~\ref{alg:sample-plus-bsamp} has $O(1)$ time by the fact that $\etree$ is binary. Line ~\ref{alg:sample-plus-union} has $O(\log{k})$ time by nature of the TreeSet datastructure and the fact that by definition any monomial sampled from $\expandtree{\etree}$ has degree $\leq k$ and hence at most $k$ distinct variables, which in turn implies that the TreeSet has $\leq k$ elements in it at any time.
Finally, line ~\ref{alg:sample-plus-product} is in $O(1)$ for a product and an assignment operation. When a times node is visited, the same union, product, and assignment operations take place, and we again have $O(\log{k})$ runtime. When a variable leaf node is traversed, the same union operation occurs with $O(\log{k})$ runtime, and a constant leaf node has the above mentioned product and assignment operations. Thus for each node visited, we have $O(\log{k})$ runtime, and the final runtime for $\sampmon$ is $O(\log{k} \cdot k \cdot depth(\etree))$.
\end{proof}
\qed
\AH{\large\bf{New stuff 092520.}}
\begin{Claim}\label{claim:constpk-TI}
Given a positive query polynomial $\poly$ over a $\ti$, with constant $\prob$ such that there exists a $\prob_0$ where for all $\prob_i, \prob_0 \leq \prob_i$, and constant $k = \degree(\poly)$, the ratio $\frac{\abs{\etree}(1,\ldots, 1)}{\rpoly(\prob_1,\ldots, \prob_\numvar)}$ is constant.
\end{Claim}
\begin{proof}[Proof of Claim ~\ref{claim:constpk-TI}]
By independence, a $\ti$ has the property that all of its annotations are positive. Combined with the fact that ~\cref{claim:constpk-TI} uses only positive queries, i.e., queries that only use $\oplus$ and $\otimes$ semiring operators over its polynomial annotations, it is the case that no negation exists pre or post query.
For any $\poly$ then, it is true that all coefficients in $\abs{\etree}(1,\ldots, 1)$ are positive and thus the same as their $\rpoly$ counterparts. This then implies that the ratio $\frac{\abs{\etree}(1,\ldots, 1)}{\rpoly(\prob_1,\ldots, \prob_\numvar)} \leq \frac{\abs{\etree}(1,\ldots, 1)}{\abs{\etree}(1,\ldots, 1) \cdot \prob_0^k}$, which is indeed a constant.
\end{proof}
\qed
\subsection{$\rpoly$ over $\bi$}
\AH{A general sufficient condition is the $\bi$ having fixed block size (thus implying increasing number of blocks for growing $\numvar$). For increasing $\numvar$, the ratio $\frac{\abs{\etree}(1,\ldots, 1)}{\rpoly(\prob_1,\ldots, \prob_\numvar)}$ can be proven to be a constant since, as $\numvar$ increases, it has to be the case that new blocks are added, and this results in a constant number of terms cancelled out by $\rpoly$, with the rest surviving, which gives us a constant $\frac{\abs{\etree}(1,\ldots, 1)}{\rpoly(\prob_1,\ldots, \prob_\numvar)}$.
\par In the general case, with fixed number of blocks and growing $\numvar$, all additional terms will be cancelled out by $\rpoly$ while for $\abs{\etree}(1,\ldots, 1)$ it is the case that it will grow exponentially with $\numvar$, yielding a ratio $\frac{O(2^\numvar)}{O(1)}$ and (as will be seen) greater.}
\subsubsection{Known Reduction Result $\bi \mapsto \ti$}
Denote an arbitrary $\bi$ as $\bipdb = (\bipd, \biwset)$ and a constructed $\ti$ to be $\tipdb = (\tipd, \tiwset)$, the details to be described next.
It is well known that $\bipdb$ can be reduced to a query $\poly$ over $\tipdb$. For completeness, let us describe the reduction.
Let tuples in $\bipdb$ be denoted $a_{\block, i}$ and their $\tipdb$ counterparts as $x_{\block, i}$, where $\block$ represents the block id in which $a_{\block, i}$ resides.
\begin{Theorem}\label{theorem:bi-red-ti}
For any $\bipdb$, there exists a query $\poly$ and $\tipdb$ such that $\poly(\tiwset)$ over distribution $\tipd$ outputs elements in $\biwset$ according to their respective probabilities in $\bipd$.
\end{Theorem}
\begin{Definition}[Total Ordering $\biord$]\label{def:bi-red-ti-order}
The order $\biord$ is a fixed total order across all tuples in block $\block$ of $\bipdb$.
\end{Definition}
\begin{Definition}[Query $\poly$]\label{def:bi-red-ti-q}
$\poly$ is constructed to map all possible worlds of $\db_{ti} \in \tiwset$ for which $x_i$ is the greatest according to $\biord$, to the worlds $\vct{w}$ in $\biwset$ in which $a_{\block, i}$ is present and $\bipd(\vct{w}) > 0$. Recall the constraint on $\bipdb$ to be that if $a_{\block, i}$ is present, then it is the case that for all $j \neq i$, tuple $a_{\block, j}$ is not present. For $\bipdb$ with exactly one block, all such worlds $\db_{ti}$ are mapped to the world $\{a_i\}$.
\end{Definition}
For simplicity, we will consider $\bipdb$ to consist of one block $\block$. By independence of blocks in $\bi$, the proofs below immediately generalize to the case of $\bipdb$ with multiple blocks\textcolor{blue}{...umm, we'll see, we made need to argue this}.
The reduction consists of the construction of a query $\poly$ and $\tipdb$ such that $\poly$ is computed over $\tipdb$. To construct the $\tipdb$ given an arbitrary $\bipdb$ a tuple alternative $a_{\block, i}$ is transcribed to a tuple in $\tipdb$ with probability
\begin{equation}
P(x_{b, i}) = \begin{cases}
\frac{P(a_{\block, i})}{\prod_{j = 1}^{i - 1}(1 - P(x_{\block, j}))} &\textbf{if }i > 1\\
P(a_i) &\textbf{if } i = 1.
\end{cases}\label{eq:bi-red-ti-func}
\end{equation}
The above is more simply written as
\begin{equation*}
\tipd(x_{\block, i}) = \frac{P(a_{\block, i})}{1 - \sum_{j = 1}^{i - 1} P(a_{\block, j})}
\end{equation*}
The above mapping is applied across all tuples of $\bipdb$.
This method for computing the probabilities of the tuples in $\tipdb$ allows for the following. According to $\biord$, the powerset of possible worlds is mapped in such a way that the first ordered tuple appearing in a possible world $\db_{\tiabb}$of $\tiwset$ has that world mapped to the world $\db_{\biabb} \in \biwset$ where $a_{\block, i}$ is present with $\bipd(\db_{\biabb}) > 0$. Recall that since we are considering a $\bi$ with one block, there is only one such world in $\biwset$.
\begin{Lemma}\label{lem:bi-red-ti-prob}
The sum of the probabilities of all $\db_{\tiabb} \in \tiwset$ database worlds mapped to a a given tuple $x_{b, i}$ equals the probability of the tuple $a_{\block, i}$ in the original $\bipdb$.
\end{Lemma}
\begin{proof}[Proof of Lemma ~\ref{lem:bi-red-ti-prob}]
The proof is by induction. Given a tuple $a_{\block, i}$ in $\bipdb$ such that $1 \leq i \leq \abs{b}$, (where $\abs{b}$ denotes the number of alternative tuples in block $\block$), by ~\cref{eq:bi-red-ti-func} $P(x_{\block, i}) = \frac{P(a_{\block, i})}{1 \cdot \prod_{j = 1}^{i - 1} (1 - P(x_{\block, j}))}$.
For the base case, we have that $i = 1$ which implies that $P(x_{\block, i}) = P(a_{\block, i})$ and the base case is satisfied.
%Other neat tidbits include that $\abs{b} = 1$, the set $b = \{a_1\}$, and the powerset $2^b = \{\emptyset, \{1\}\} = \tiwset$. For coolness, also see that $P(\neg x_i) = 1 - P(x_i) = 1 - P(a_i) = \emptyset$, so there is, in this case, a one to one correspondence of possible worlds and their respective probabilities in both $\ti$ and $\bi$, but this is extraneous information for the proof.
The hypothesis is then that for $k \geq 1$ tuple alternatives, ~\cref{lem:bi-red-ti-prob} holds.
For the inductive step, prove that ~\cref{lem:bi-red-ti-prob} holds for $k + 1$ alternatives. By definition of the query $\poly$ (~\cref{def:bi-red-ti-q}), it is a fact that only the world $\wElem_{x_{\block, k + 1}} = \{x_{\block, k + 1}\}$ in the set of possible worlds is mapped to $\bi$ world $\{a_{\block, k + 1}\}$. Then for world $\wElem_{x_{\block, k + 1}}$ it is the case that $P(\wElem_{x_{\block, k + 1}}) = \prod_{j = 1}^{k} (1 - P(x_j)) \cdot P(x_{\block k + 1})$. Since by ~\cref{eq:bi-red-ti-func} $P(x_{\block, k + 1}) = \frac{P(a_{\block, k + 1})}{\prod_{j = 1}^{k}(1 - P(x_{\block, j}))}$, we get
\begin{align*}
P(\wElem_{x_{\block, k + 1}}) =& \prod_{j = 1}^{k} (1 - P(x_{\block, j})) \cdot P(x_{\block, k + 1})\\
=&\prod_{j = 1}^{k} (1 - P(x_{\block, j})) \cdot \frac{P(a_{\block, k + 1})}{\prod_{j = 1}^{k}(1 - P(x_{\block, j}))}\\
=&P(a_{\block, k + 1}).
\end{align*}
\end{proof}
\qed
This leaves us with the task of constructing a query $\poly$ over $\tipdb$ to perform the desired mapping of possible worlds. Setting $\poly$ to the following query yields the desired result.
\begin{lstlisting}
SELECT A FROM TI as a
WHERE A = 1 OR
OR A = 2 AND NOT EXISTS(SELECT A FROM TI as b
WHERE A = 1 AND a.blockID = b.blockID)
$\vdots$
OR A = $|$b.blockID$|$ AND NOT EXISTS(SELECT A FROM TI as b
WHERE A = 1 OR A = 2 $\ldots$ A = $|$b.blockID$|$ AND a.blockID = b.blockID
\end{lstlisting}
\begin{Lemma}\label{lem:bi-red-ti-q}
The query $\poly$ satisfies the requirements of ~\cref{def:bi-red-ti-q}.
\end{Lemma}
\begin{proof}[Proof of Lemma ~\ref{lem:bi-red-ti-q}]
For any possible world in $2^b$, notice that the WHERE clause selects the tuple with the greatest ordering in the possible world. For all other tuples, disjunction of predicates dictates that no other tuple will be in the output by mutual exclusivity of the disjunction. Thus, it is the case for any $\ti$ possible world, that the tuple $x_{\block, i}$ with the greatest ordering appearing in that possible world will alone be in the output, and all such possible worlds with $x_{\block, i}$ as the greatest in the ordering will output the same world corresponding to the $\bi$ world for the disjoint tuple $a_{\block, i}$.
\end{proof}
\qed
\begin{proof}[Proof of Theorem ~\ref{theorem:bi-red-ti}]
For multiple blocks in $\bipdb$, note that the above reduction to $\poly(\tipdb)$ with multiple 'blocks' will behave the same as $\bipdb$ since the property of independence for $\ti$ ensures that all tuples in the $\ti$ will have the same marginal probability across all possible worlds as their tuple probability, regardless of how many tuples and, thus, worlds the $\tipdb$ has. Note that this propety is unchanging no matter what probabilities additional tuples in $\tipdb$ are assigned.
To see this consider the following.
\begin{Lemma}\label{lem:bi-red-ti-ind}
For any set of independent variables $S$ with size $\abs{S}$, when adding another distinct independent variable $y$ to $S$ with probability $\prob_y$, it is the case that the probability of each variable $x_i$ in $S$ remains unchanged.
\AH{This may be a well known property that I might not even have the need to prove, but since I am not certain, here goes.}
\end{Lemma}
\begin{proof}[Proof of Lemma ~\ref{lem:bi-red-ti-ind}]
The proof is by induction. For the base case, consider a set of one element $S = \{x\}$ with probability $\prob_x$. The set of possible outcomes includes $2^S = \{\emptyset, \{x\}\}$, with $P(\emptyset) = 1 - \prob_x$ and $P(x) = p_x$. Now, consider $S' = \{y\}$ with $P(y) = \prob_y$ and $S \cup S' = \{x, y\}$ with the set of possible outcomes now $2^{S \cup S'} = \{\emptyset, \{x\}, \{y\}, \{x, y\}\}$. The probabilities for each world then are $P(\emptyset) = (1 - \prob_x)\cdot(1 - \prob_y), P(x) = \prob_x \cdot (1 - \prob_y), P(y) = (1 - \prob_x)\cdot \prob_y$, and $P(xy) = \prob_x \cdot \prob_y$. For the worlds where $x$ appears we have
\[P(x) + P(xy) = \prob_x \cdot (1 - \prob_y) + \prob_x \cdot \prob_y = \prob_x \cdot \left((1 - \prob_y) + \prob_y\right) = \prob_x \cdot 1 = \prob_x.\]
Thus, the base case is satisfied.
For the hypothesis, assume that $\abs{S} = k$ for some $k \geq 1$, and for $S'$ such that $\abs{S'} = 1$ where its element is distinct from all elements in $S$, the probability of each independent variable in $S$ is the same in $S \cup S'$.
For the inductive step, let us prove that for $\abs{S_{k + 1}} = k + 1$ elements, adding another element will not change the probabilities of the independent variables in $S$. By the hypothesis, that $S_k \cup S_{k + 1}$, all probabilities in $S_k$ remained untainted after the union. Now consider a set $S' = \{z\}$ and the union $S_{k + 1} \cup S'$. Since all variables are distinct and independent, it is the case that the set of possible outcomes of $S_{k + 1} \cup S' = 2^{S_{k + 1} \cup S'}$ with $\abs{2^{S_{k + 1} \cup S'}} = 2^{\abs{S_{k + 1}} + \abs{S'}}$ since $\abs{S_{k + 1}} + \abs{S'} = \abs{S_{k + 1} \cup S'}$. Then, since $2^{\abs{S_{k + 1}} + \abs{S'}} = 2^{\abs{S_{k + 1}}} \cdot 2^{\abs{S'}}$, and $2^{S'} = \{\emptyset, \{x\}\}$, it is the case that all elements in the original set of out comes will appear \textit{exactly one} time without $z$ and \textit{exactly one }time with $z$, such that for element $x \in 2^{S_{k + 1}}$ with probability $\prob_x$ we have $P(x\text{ }OR\text{ }xz) = \prob_x \cdot (1 - \prob_z) + \prob_x \cdot \prob_z = \prob_x\cdot \left((1 - z) + z\right) = \prob_x \cdot 1 = \prob_x$, and the probabilities remain unchanged, and, thus, the marginal probabilities for each variable in $S_{k + 1}$ across all possible outcomes remain unchanged.
\end{proof}
\qed
The repeated application of ~\cref{lem:bi-red-ti-ind} to any 'block' of independent variables in $\tipdb$ provides the same result as joining two sets of distinct elements of size $\abs{S_1}, \abs{S_2} > 1$.
Thus, by lemmas ~\ref{lem:bi-red-ti-prob}, ~\ref{lem:bi-red-ti-q}, and ~\ref{lem:bi-red-ti-ind}, the proof follows.
\end{proof}
\qed
\subsubsection{General results for $\bi$}\label{subsubsec:bi-gen}
\AH{One thing I don't see in the argument below is that as $\numvar \rightarrow \infty$, we have that $\prob_0 \rightarrow 0$.}
The general results of approximating a $\bi$ using the reduction and ~\cref{alg:mon-sam} do not allow for the ratio $\frac{\abs{\etree}(1,\ldots, 1)}{\rpoly(\prob_1,\ldots, \prob_\numvar)}$ to be a constant. Consider the following example.
Let monomial $y_i = P(x_i) \cdot \prod_{j = 1}^{i - 1}(1 - P(x_j))$ Let $\poly(\vct{X}) = \sum_{i = 1}^{\numvar}y_i$. Note that this query output can exist on a projection for which each tuple agrees on the projected values of the query in a $\bi$ consisting of one block and $\numvar$ tuples.
First, let's analyze the numerator $\abs{\etree}(1,\ldots, 1)$. Expanding $\abs{\etree}$ yields $X_i + (1 + X_1)\cdot X_2 + \cdots + (1 + X_1)\cdot(1 + X_2)\cdots(1 + X_{\numvar - 1})\cdot X_n$ which yields a geometric series $S_{\abs{\etree}} = 2^0 + 2^1 +\cdots+2^{\numvar - 1}$. We can perform the following manipulations to obtain the following closed form.
\begin{align*}
2 \cdot S_{\abs{\etree}} =& 2^1 +\cdots+2^\numvar = 2^{\numvar} + S_2 - 1 \\
S_{\abs{\etree}} =& 2^{\numvar + 1} - 1
\end{align*}
So, then $\abs{\etree}(1,\ldots, 1) = 2^{\numvar} - 1$.
On the other hand, considering $\rpoly(\prob_1,\ldots, \prob_\numvar)$, since we are simply summing up the probabilities of one block of disjoint tuples (recall that $P(x_i) = \frac{P(a_i)}{1\cdot\prod_{j = 1}^{i - 1}(1 - P(x_j))}$ in the reduction for $a_i$ the original $\bi$ probability), it is the case that $\rpoly(\prob_1,\ldots, \prob_\numvar) \leq 1$, and the ratio $\frac{\abs{\etree}(1,\ldots, 1)}{\rpoly(\prob_1,\ldots, \prob_\numvar)}$ in this case is exponential $O(2^\numvar)$. Further note that setting $\poly(\vct{X}) = \sum_{i = 1}^{\numvar} y_i^k$ will yield an $O(2^{\numvar \cdot k})$ bound.
\subsubsection{Sufficient Condition for $\bi$ for linear time Approximation Algorithm}
Let us introduce a sufficient condition on $\bipdb$ for a linear time approximation algorithm.
\AH{Lemma ~\ref{lem:bi-suf-cond} is not true for the case of $\sigma$, where a $\sigma(\bowtie)$ query could select tuples from the same block, and self join them such that all tuples cancel out. We need a definition for 'safe' (in this context) queries, to prove the lemma.}
\begin{Lemma}\label{lem:bi-suf-cond}
For $\bipdb$ with fixed block size $\abs{b}$, the ratio $\frac{\abs{\etree}(1,\ldots, 1)}{\rpoly(\prob_1,\ldots, \prob_\numvar)}$ is a constant.
\end{Lemma}
\AH{Two observations.
\par
1) I am not sure that the argument below is correct, as I think we would still get something exponential in the numerator $\abs{\etree}(1,\ldots, 1)$.
\par2) I \textit{think} a similar argument will hold however for the method of not using the reduction.}
\begin{proof}[Prood of Lemma ~\ref{lem:bi-suf-cond}]
For increasing $\numvar$ and fixed block size $\abs{b}$ in $\bipdb$ given query $\poly = \sum_{i = 1}^{\numvar}$ where $y_i = x_i \cdot \prod_{j = 1}^{i - 1} (1 - x_j)$, a query whose output is the maximum possible output, it has to be the case as seen in ~\cref{subsubsec:bi-gen} that for each block $b$, $\rpoly(\prob_{b, 1},\ldots, \prob_{b, \abs{b}}) = P(a_{b, 1}) + P(a_{b, 2}) + \cdots + P(a_{b, \abs{b}})$ for $a_i$ in $\bipdb$. As long as there exists no block in $\bipdb$ such that the sum of alternatives is $0$ (which by definition of $\bi$ should be the case), we can bound the $\rpoly(p_1,\ldots, \prob_\numvar) \geq \frac{\prob_0 \cdot \numvar}{\abs{\block}}$ for $\prob_0 > 0$, and then we have that $\frac{\abs{\etree}(1,\ldots, 1)}{\rpoly(\prob_1,\ldots, \prob_\numvar)}$ is indeed a constant.
\end{proof}
\qed
Given a $\bipdb$ satisfying ~\cref{lem:bi-suf-cond}, it is the case by ~\cref{lem:approx-alg} that ~\cref{alg:mon-sam} runs in linear time.
\AH{\Large \bf{092520 -- 100220 New material.}}
\section{Algorithm ~\ref{alg:mon-sam} for $\bi$}
We may be able to get a better run time by developing a separate approximation algorithm for the case of $\bi$. Instead performing the reduction from $\bi \mapsto \poly(\ti)$, we decide to work with the original variable annotations given to each tuple alternative in $\bipdb$. For clarity, let us assume the notation of $\bivar$ for the annotation of a tuple alternative. The algorithm yields $0$ for any monomial sampled that cannot exist in $\bipdb$ due to the disjoint property characterizing $\bi$. The semantics for $\rpoly$ change in this case. $\rpoly$ not only performs the same modding function, but also sets all monomial terms to $0$ if they contain variables which appear within the same block.
\begin{algorithm}[H]
\caption{$\approxq_{\biabb}$($\etree$, $\vct{p}$, $\conf$, $\error$, $\bivec$)}
\label{alg:bi-mon-sam}
\begin{algorithmic}[1]
\Require \etree: Binary Expression Tree
\Require $\vct{p} = (\prob_1,\ldots, \prob_\numvar)$ $\in [0, 1]^N$
\Require $\conf$ $\in [0, 1]$
\Require $\error$ $\in [0, 1]$
\Require $\bivec$ $\in [0, 1]^{\abs{\block}}$\Comment{$\abs{\block}$ is the number of blocks}
\Ensure \vari{acc} $\in \mathbb{R}$
\State $\vari{sample}_\vari{next} \gets 0$
\State $\accum \gets 0$\label{alg:mon-sam-global1}
\State $\numsamp \gets \ceil{\frac{2 \log{\frac{2}{\conf}}}{\error^2}}$\label{alg:mon-sam-global2}
\State $(\vari{\etree}_\vari{mod}, \vari{size}) \gets $ \onepass($\etree$)\label{alg:mon-sam-onepass}\Comment{$\onepass$ is ~\cref{alg:one-pass} \;and \sampmon \; is ~\cref{alg:sample}}
\For{\vari{i} \text{ in } $1\text{ to }\numsamp$}\Comment{Perform the required number of samples}
\State $(\vari{M}, \vari{sgn}_\vari{i}) \gets $ \sampmon($\etree_\vari{mod}$)\label{alg:mon-sam-sample}
\For{$\vari{x}_\vari{\block,i}$ \text{ in } $\vari{M}$}
\If{$\bivec[\block] = 1$}\Comment{If we have already had a variable from this block, $\rpoly$ drops the sample.}
\State $\vari{sample}_{\vari{next}} \gets 1$
\State break
\Else
\State $\bivec[\block] = 1$
% \State $\vari{sum} = 0$
% \For{$\ell \in [\abs{\block}]$}
% \State $\vari{sum} = \vari{sum} + \bivec[\block][\ell]$
% \EndFor
% \If{$\vari{sum} \geq 2$}
% \State $\vari{sample}_{\vari{next}} \gets 1$
% \State continue\Comment{Not sure for psuedo code the best way to state this, but this is analogous to C language continue statement.}
\EndIf
\EndFor
\If{$\vari{sample}_{\vari{next}} = 1$}
\State $\vari{sample}_{\vari{next}} \gets 0$
\State continue
\EndIf
\State $\vari{Y}_\vari{i} \gets 1$\label{alg:mon-sam-assign1}
\For{$\vari{x}_{\vari{j}}$ \text{ in } $\vari{M}$}%_{\vari{i}}$}
\State $\vari{Y}_\vari{i} \gets \vari{Y}_\vari{i} \times \; \vari{\prob}_\vari{j}$\label{alg:mon-sam-product2} \Comment{$\vari{p}_\vari{j}$ is the assignment to $\vari{x}_\vari{j}$ from input $\vct{p}$}
\EndFor
\State $\vari{Y}_\vari{i} \gets \vari{Y}_\vari{i} \times\; \vari{sgn}_\vari{i}$\label{alg:mon-sam-product}
\State $\accum \gets \accum + \vari{Y}_\vari{i}$\Comment{Store the sum over all samples}\label{alg:mon-sam-add}
\EndFor
\State $\vari{acc} \gets \vari{acc} \times \frac{\vari{size}}{\numsamp}$\label{alg:mon-sam-global3}
\State \Return \vari{acc}
\end{algorithmic}
\end{algorithm}
Before redefining $\rpoly$ in terms of the $\bi$ model, we need to define the notion of performing a mod operation with a set of polynomials.
\begin{Definition}[Mod with a set of polynomials]\label{def:mod-set-poly}
To mod a polynomial $\poly$ with a set $\vct{Z} = \{Z_1,\ldots Z_x\}$ of polynomials, the mod operation is performed successively on the $\poly$ modding out each element of the set $\vct{Z}$ from $\poly$.
\end{Definition}
\begin{Example}\label{example:mod-set-poly}
To illustrate for $\poly = X_1^2 + X_1X_2^3$ and the set $\vct{Z} = \{X_1^2 - X_1, X_2^2 - X_2, X_1X_2\}$ we get
\begin{align*}
&X_1^2 + X_1X_2^3 \mod X_1^2 - X_1 \mod X_2^2 - X_2 \mod X_1X_2\\
=&X_1 + X_1X_2^3 \mod X_2^2 - X_2 \mod X_1X_2\\
=&X_1 + X_1X_2 \mod X_1X_2\\
=&X_1
\end{align*}
\end{Example}
\begin{Definition}[$\rpoly$ for $\bi$ Data Model]\label{def:bi-alg-rpoly}
$\rpoly(\vct{X})$ over the $\bi$ data model is redefined to include the following mod operation in addition to definition ~\ref{def:qtilde}. For every $j \neq i$, we add the operation $\mod X_{\block, i}\cdot X_{\block, j}$. For set of blocks $\mathcal{B}$ and the size of block $\block$ as $\abs{\block}$,
\[\rpoly(\vct{X}) = \poly(\vct{X}) \mod \{X_{\block, i}^2 - X_{\block, i} \st \block \in \mathcal{B}, i \in [\abs{\block}]\} \cup_{\block \in \mathcal{B}} \{X_{\block, i}X_{\block, j} \st i, j \in [\abs{\block}], i \neq j\}
% \mod X_{\block_1, 1}^2 - X_{\block_1, 1} \cdots \mod X_{\block_k, \abs{\block_k}}^2 - X_{\block_k, \abs{\block_k}} \mod X_{b_1, 1} \cdot X_{b_1, 2}\cdots \mod X_{\block_1, \abs{\block_1} -1} \cdot X_{\block, \abs{\block_1}}\cdots \mod X_{\block_k, 1} \cdot X_{\block_k, 2} \cdots \mod X_{\block_k, \abs{\block_k} - 1}\cdot X_{\block_K, \abs{\block_k}}.
\]
\end{Definition}
\subsection{Correctness}
\begin{Theorem}\label{theorem:bi-approx-rpoly-bound}
For any query polynomial $\poly(\vct{X})$, an approximation of $\rpoly(\prob_1,\ldots, \prob_\numvar)$ in the $\bi$ setting can be computed in $O\left(\treesize(\etree) + \frac{\log{\frac{1}{\conf}}\cdot \abs{\etree}^2(1,\ldots, 1)}{\error^2\cdot\rpoly^2(\prob_1,\ldots, \prob_\numvar)}\right)$, with multiplicative $(\error,\delta)$-bounds, where $k$ denotes the degree of $\poly$.
\end{Theorem}
\begin{proof}[Proof of Theorem ~\ref{theorem:bi-approx-rpoly-bound}]
By the proof of ~\cref{lem:approx-alg}, with a minor adjustment on $\evalmp$, such that we define the function to output $0$ for any monomial sharing disjoint variables, coupled with the fact that additional operations in ~\cref{alg:bi-mon-sam} are $O(1)$ occuring at most $k$ times for each of the $\numsamp$ samples, the proof of ~\cref{theorem:bi-approx-rpoly-bound} immediately follows.
\end{proof}
\qed
\subsection{Safe Query Class for $\bi$}
We want to analyze what is the class of queries and data restrictions that are necessary to guarantee that $\frac{\abs{\etree}(1,\ldots, 1)}{\rpoly(\prob_{1},\ldots, \prob_{\numvar})}$ is $O(1)$.
\subsubsection{When $\rpoly$ is zero}
First, consider the case when $\rpoly$ cancels out all terms in $\poly$, where $\poly \neq \emptyset$. For $\rpoly$ to cancel out a tuple $\tup$, by ~\cref{def:bi-alg-rpoly} it must be the case that output tuple $\tup$ is dependent on two different tuples appearing in the same block. For this condition to occur, it must be that the query $\poly$ contains a self join operation on a table $\rel$, from which $\tup$ has been derived.
Certain conditions on both the data and query must exist for all tuples $\tup$ to be cancelled out by $\rpoly$ as described above.
For $\rpoly$ to be $0$, the data of a $\bi$ must satisfy certain conditions.
\begin{Definition}[Data Restrictions]\label{def:bi-qtilde-data}
Consider $\bi$ table $\rel$. For $\rpoly$ to potentially cancel all its terms, $\rel$ must be such that given a self join, the join constraints remain unsatisfied for all tuple combinations $x_{\block_i, \ell} \times x_{\block_j, \ell'}$ for $i \neq j$, $\ell \in [\abs{\block_i}], \ell' \in [\abs{\block_j}]$, i.e. combinations across different blocks. Note that this is trivially satisfied with a $\rel$ composed of just one block. Further, it must be the case that the self join constraint is only satisfied in one or more crossterm combinations $x_{\block, i} \times x_{\block_j}$ for $i \neq j$, i.e., within the same block of the input data.
\end{Definition}
To be precise, only equijoins are considered in the following definition. Before preceding, note that a natural self join will never result in $\rpoly$ cancelling all terms, since it is the case that each tuple will necessarily join with itself, and $\rpoly$ will not mod out this case. Also, although we are using the term self join, we consider cases such that query operations over $\rel$ might be performed on each join input prior to the join operation. While technically the inputs may not be the same set of tuples, this case must be considered, since all the tuples originate from the table $\rel$. To this end, let $\poly_1(\rel) = S_1$ and $\poly_2(\rel) = S_2$ be the input tables to the join operation.
\begin{Definition}[Class of Cancelling Queries]\label{def:bi-qtilde-query-class}
When ~\cref{def:bi-qtilde-data} is satisfied, it must be that $\poly$ contains a join $S_1 \bowtie_\theta S_2$ such that either% that satisfies the following constraints based on its structure.
\textsc{Case 1:} $S_1 \cap S_2 = \emptyset$
%Any join over this structure will produce a $\poly$ such that $\rpoly$ cancels all monomials out.
%Such a condition implies $\rpoly$ is $0$ regardless of join condition $\theta$. Note the beginning premise of this definition, and the fact that such premise rules out the natural join across all attributes, since we would have that $\poly = \rpoly = 0$.
Or
\textsc{Case 2:} $S_1 \cap S_2 \neq \emptyset$, the attributes in the join predicate are non-matching, i.e., neither operand of the comparison is a strict subset of the other, and no input tuple has agreeing values across the join attributes.
%\begin{enumerate}
% \item When the join condition $\theta$ involves equality between matching attributes, it must be that the attributes of the join conditon $\attr{\theta}$ are a strict subset of $\attr{\rel}$. Then, to satisfy ~\cref{def:bi-qtilde-data} it must be that the join input consists of non-intersecting strict subsets of $\rel$, meaning $S_1 \cap S_2 = \emptyset$ and $S_1, S_2 \neq \emptyset$. $\poly_1$ in ~\cref{ex:bi-tildeq-0} illustrates this condition.
% \item If $\theta$ involves an equality on non-matching attributes, there exist two cases.
% \begin{enumerate}
% \item The first case consists of when the join inputs intersect, i.e., $S_1 \cap S_2 \neq \emptyset$ . To satisfy ~\cref{def:bi-qtilde-data} it must be the case that no tuple can exist with agreeing values across all attributes in $\attr{\theta}$. $\poly_3$ of ~\cref{ex:bi-tildeq-0} demonstrates this condition.
% \item The second case consists of when $S_1 \cap S_2 = \emptyset$ and $S_1, S_2 \neq \emptyset$ in the join input, and this case does not contradict the requirements of ~\cref{def:bi-qtilde-query-class}. This case is illustrated in $\poly_2$ of ~\cref{ex:bi-tildeq-0}.
% \end{enumerate}
%\end{enumerate}% , cause $\rpoly$ to be $0$ must have the following characteristics. First, there must be a self join. Second, prior to the self join, there must be operations that produce non-intersecting sets of tuples for each block in $\bi$ as input to the self join operation.
\end{Definition}
In ~\cref{ex:bi-tildeq-0}, $\poly_1$ and $\poly_2$ are both examples of \textsc{Case 1}, while $\poly_3$ is an example of \textsc{Case 2}.
\begin{Theorem}\label{theorem:bi-safe-q}
When both ~\cref{def:bi-qtilde-data} and ~\cref{def:bi-qtilde-query-class} are satisfied, $\rpoly$ cancels out all monomials.
\end{Theorem}
\begin{proof}[Proof of Theorem ~\ref{theorem:bi-safe-q}]
Starting with the case that $S_1 \cap S_2 = \emptyset$. When this is the case, by definition, all joins on tuples in $S_1$ and $S_2$ will be will involve elements in $S_1 \times S_2$ such that both tuples are distinct. Further, ~\cref{def:bi-qtilde-data} rules out joins across different blocks, while calling for joins of the above form within the same block. Thus all tuples in the query output are dependent on more than one tuple from the same block, thus implying by ~\cref{def:bi-alg-rpoly} that $\rpoly$ will cancel all monomials.
For the next case where $S_1 \cap S_2 \neq \emptyset$, note that there exists at least one tuple in both $S_1$ and $S_2$ that is the same. Therefore, all equijoins involving matching attributes will produce at least one self joined tuple in the output, breaking the last property of ~\cref{def:bi-qtilde-data}. For the case of equijoins with predicates involving non-matching attribute operands, note that by definition of equijoin, the only case that a tuple shared in both $S_1$ and $S_2$ can join on itself is precisely when that tuple's values agree across all the join attributes in $\theta$. Thus, it is the case that when $S_1 \cap S_2 \neq \emptyset$ and the join predicate involves equality comparison between non-matching attributes such that the values of the non-matching comparison attributes for each tuple in $\{S_1 \cap S_2\}$ do not agree, we have that ~\cref{def:bi-qtilde-data} is not contradicted, and when ~\cref{def:bi-qtilde-data} is fulfilled, it must be the case that $\poly \neq 0$ while $\rpoly = 0$.
This concludes the proof.
\end{proof}
\qed
Note then that the class of queries described in ~\cref{def:bi-qtilde-query-class} belong to the set of queries containing some form of selction over self cross product.
%\begin{proof}[Proof of Lemma ~\ref{lem:bi-qtilde-data}]
%\end{proof}
%\begin{proof}[Proof of Lemma ~\ref{lem:bi-qtilde-query-class}]
%\end{proof}
%%%%%%%%%%%%%%%%%%%%%%%
%The condition that causes $\rpoly(\prob_1,\ldots, \prob_\numvar)$ to be $0$ is when all the output tuples in each block cancel each other out. Such occurs when the annotations of each output tuple break the required $\bi$ property that tuples in the same block must be disjoint. This can only occur for the case when a self-join outputs tuples each of which have been joined to another tuple from its block other than itself.
%
%The observation is then the following. In order for such a condition to occur, we must have a query that is a self-join such that the join is on two different sets of atoms for each block. This condition can occur when inner query operations with different constraints on input table $\rel$ produce two non-intersecting sets of tuples and then performs a self join on them, such that the join condition \textit{only} holds for tuples that are members of the same block.
%
%There are two operators that can produce the aforementioned selectivity. First, consider $\sigma$, where two different selection conditions $\theta_1$ and $\theta_2$ over $\rel$ can output sets $S_{\sigma_{\theta_1}}$ and $S_{\sigma_{\theta_2}}$ where $S_{\sigma_{\theta_1}} \cap S_{\sigma_{\theta_2}} = \emptyset$. A join over these two outputs can produce an ouput $\poly$ where all annotations will be disjoint and $\rpoly$ will effectively cancel them all out. Second, consider the projection operator $\pi$, such that projections over $\rel$ which project on different attributes can output two non-intersecting sets of tuples, which when joined, again, provided that the join condition holds only for tuples appearing in the same block, can output tuples all of which will break the disjoint requirement and $\rpoly$ will cancel them out.
\begin{Example}\label{ex:bi-tildeq-0}
Consider the following $\bi$ table $\rel$ consisting of one block, with the following queries $\poly_1 = \sigma_{A = 1}(\rel)\bowtie_{B = B'} \sigma_{A = 2}(\rel)$, $\poly_2 = \sigma_{A = 1}(\rel)\bowtie_{A = B'} \sigma_{A = 2}(\rel)$, and $\poly_3 = \rel \bowtie_{A = B} \rel$. While the output $\poly_i \neq \emptyset$, all queries have that $\rpoly_i = 0$. Since $\rel$ consists of only one block, we will use single indexing over the annotations.
\end{Example}
\begin{figure}[ht]
\begin{tabular}{ c | c c c }
\rel & A & B & $\phi$\\
\hline
& 1 & 2 & $x_1$\\
& 2 & 1 & $x_2$\\
& 1 & 3 & $x_3$\\
& 3 & 1 & $x_4$\\
\end{tabular}
\caption{Example~\ref{ex:bi-tildeq-0} Table $\rel$}
\label{fig:bi-ex-table}
\end{figure}
%%%%%%%%%%Query 1 and 2
\begin{figure}[ht]
\begin{subfigure}{0.2\textwidth}
\centering
\begin{tabular}{ c | c c c }
$\sigma_{\theta_{A = 1}}(\rel )$& A & B & $\phi$\\
\hline
& 1 & 2 & $x_1$\\
& 1 & 3 & $x_3$\\
\end{tabular}
\caption{$\poly_1, \poly_2$ First Selection}
\label{subfig:bi-q1-sigma1}
\end{subfigure}
\begin{subfigure}{0.2\textwidth}
\centering
\begin{tabular}{ c | c c c}
$\sigma_{\theta_{A = 2}}(\rel)$ & A & B' & $\phi$\\
\hline
& 2 & 1 & $x_2$\\
\end{tabular}
\caption{$\poly_1, \poly_2$ Second Selection}
\label{subfig:bi-q1-sigma2}
\end{subfigure}
\begin{subfigure}{0.25\textwidth}
\centering
\begin{tabular}{ c | c c c c c}
$\poly_1(\rel)$ & $A_R$ & $B_R$ & $A_{\rel'}$ & $B_{\rel'}$ & $\phi$\\
\hline
& 1 & 2 & 2 & 1 & $x_1x_2$\\
\end{tabular}
\caption{$\poly_1(\rel)$ Output}
\label{subfig:bi-q1-output}
\end{subfigure}
\begin{subfigure}{0.4\textwidth}
\centering
\begin{tabular}{ c | c c c c c}
$\poly_2(\rel)$ & $A_R$ & $B_R$ & $A_{\rel'}$ & $B_{\rel'}$ & $\phi$\\
\hline
& 1 & 2 & 2 & 1 & $x_1x_2$\\
& 1 & 3 & 2 & 1 & $x_2x_3$\\
\end{tabular}
\caption{$\poly_2(\rel)$ Output}
\label{subfig:bi-q2-output}
\end{subfigure}
\caption{$\poly_1, \poly_2(\rel)$}
\label{fig:bi-q1-q2}
\end{figure}
%%%%%%%%%%%Query 3
\begin{figure}[ht]
% \begin{subfigure}{0.2\textwidth}
% \centering
% \begin{tabular}{ c | c c }
% $\pi_{A}(\rel)$ & A & $\phi$\\
% \hline
% & 1 & $x_1$\\
% & 2 & $x_2$\\
% & 1 & $x_3$\\
% & 3 & $x_4$\\
% \end{tabular}
% \caption{$\poly_3$ First Projection}
% \label{subfig:bi-q3-pi1}
% \end{subfigure}
% \begin{subfigure}{0.2\textwidth}
% \centering
% \begin{tabular}{ c | c c }
% $\pi_{B}(\rel)$ & B & $\phi$\\
% \hline
% & 2 & $x_1$\\
% & 1 & $x_2$\\
% & 3 & $x_3$\\
% & 1 & $x_4$\\
% \end{tabular}
% \caption{$\poly_3$ Second Projection}
% \label{subfig:bi-q3-pi2}
% \end{subfigure}
\begin{subfigure}{0.2\textwidth}
\centering
\begin{tabular}{ c | c c c c c }
$\poly_3(\rel)$ & A & B & $A_{\rel'}$ & $B_{\rel'}$ & $\phi$\\
\hline
& 1 & 2& 2 & 1 & $x_1x_2$\\
& 1 & 2 & 3 & 1 & $x_1x_2$\\
& 2 & 1 & 1 & 2 & $x_1x_2$\\
& 1 & 3 & 2 & 1 & $x_2x_3$\\
& 1 & 3 & 3 & 1 & $x_3x_4$\\
& 3 & 1 & 1 & 3 & $x_3x_4$\\
\end{tabular}
\caption{$\poly_3(\rel)$ Output}
\label{subfig:bi-q3-output}
\end{subfigure}
\caption{$\poly_3(\rel)$}
\label{fig:bi-q3}
\end{figure}
Note that all of ~\cref{subfig:bi-q1-output}, ~\cref{subfig:bi-q2-output}, and ~\cref{subfig:bi-q3-output} each have a set of tuples, where each annotation has cross terms from its block, and by ~\cref{def:bi-alg-rpoly} $\rpoly$ will eliminate all tuples output in the respective queries.
\subsubsection{When $\rpoly > 0$}
\par\AH{General Case and Sufficient Condition for $\bi$ and $\rpoly_{\bi}$ approx alg needs to be written.}
\paragraph{General Case}
Consider the query $\poly = \sum_{i = 1}^{\numvar}x_i$, analogous to a projection where all tuples match on the projected set of attributes, meaning $\tup_i[A] = \tup_j[A]$ for $i, j \in [\numvar]$ such that $i \neq j$. When $\numvar$ grows unboundedly, $\abs{\etree}(1,\ldots, 1) = \numvar$. We assume that the sum of the probabilities of all $\numvar$ tuples in the block remain a constant as $\numvar$ grows. Thus, we have that $\frac{\abs{\etree}(1,\ldots, 1)}{\rpoly(\vct{\prob})} = \frac{n}{c}$ for some constant $c$, and this implies $O(\numvar)$ growth.
% while $\rpoly(\vct{\prob}) \leq 1$, which implies that the ratio is linear, i.e., $\frac{\abs{\etree}(1,\ldots, 1)}{\rpoly(\vct{p})} = \frac{\numvar}{\numvar \cdot \prob_0} = \frac{1}{\prob_0}$ for $\prob_0 = min(\vct{\prob})$. However, note that for $\numvar \rightarrow \infty$ it is the case that $\prob_0 \rightarrow 0$, and as $\numvar$ grows, so does $\frac{1}{\prob_0}$. Intuitively, consider when $p_0 = \frac{1}{\numvar}$. Then we know that the bound is $\frac{\numvar}{1}$ which is $O(\numvar)$.
\paragraph{Sufficient Condition for $\bi$ to achieve linear approximation}
Consider the same query $\poly = \sum_{i = 1}^{\numvar}$, but this time conditioned on a fixed block size which we denote $\abs{\block}$. Then it is the case that $\abs{\etree}(1,\ldots, 1) = \numvar$, but if we assume that all blocks have a sum of probabilities equal to $1$, $\rpoly(\vct{\prob}) = \frac{\numvar}{\abs{b}}$, and this means that $\frac{\abs{\etree}(1,\ldots, 1)}{\rpoly(\vct{\prob})} = \frac{\numvar}{\frac{\numvar}{\abs{\block}}} = \abs{\block}$. For the general case when all blocks do not have the property that the sum of the probabilities of the alternatives equal $1$, we can lower bound the sum of probabilities as $\frac{\numvar}{\abs{\block}} \cdot \prob_0$ for $\prob_0 = min(\vct{\prob})$. Note that in $\numvar \cdot \frac{\prob_0}{\abs{\block}}$, $\frac{\prob_0}{\block}$ is indeed a constant, and this gives an overall ratio of $O(1)$ as $\numvar$ increases.