paper-BagRelationalPDBsAreHard/poly-form.tex

119 lines
10 KiB
TeX

%root: main.tex
\section{Polynomial Formulation}
Let $\vect_1,\ldots, \vect_\numTup$ be vectors annotating $\numTup$ tuples in a TIDB such that \begin{equation}\vect_i[(\wbit_1,\ldots,\wbit_\numTup)] =
\begin{cases} 1 &\wbit_i = 1,\\
0 &otherwise.\label{eq:vec-def}
\end{cases}
\end{equation}
Here we define vector indexing by the $\numTup$-bit binary tuple $\wVec = (\wbit_1,\ldots,\wbit_\numTup)$ such that the possible world $\wVec$ is identified by its bit vector binary value.
%---We have chosen to ignore the vector formulation
%Futher we define the polynomial $\poly(\vect_1,\ldots,\vect_\numTup)$ as an arbitrary polynomial defined over the input vectors, whose addition and multiplication operations are defined as the traditional point-wise vector addition and multiplication. We overload notation and denote the $i^{th}$ world of $\poly$ as $\poly[\wVec]$, where $\poly$ can be viewed as the output annotation vector, and the L-1 norm can be represented as
%
%\[\norm{\poly}_1 = \sum\limits_{\wVec \in \wSet} \poly[\wVec].\]<---technically incorrect when we consider negative values in \poly
%------
Define $\poly(X_1,\ldots, X_\numTup)$ as a polynomial whose variables represent the tuple annotations of an arbitrary query. While recent research has benefited in viewing the possible worlds problem as a vector $\vect$, notice that \cref{eq:vec-def} is equivalent to $\vect_i[\wVec] = \wbit_i$. With this observation we can further reformulate this problem by viewing $\poly$ as a polynomial over bit values rather than one over vectors, where each element of the input is a bit element of the $\wVec$ bit vector, and we can thus replace each variable of $\poly$ with its corresponding input bit, and solve for the particular world $\wVec$. The output we desire is
\[\expct_{\wVec}\pbox{\poly(\wVec)} = \sum\limits_{\wVec \in \{0, 1\}^\numTup} \poly(\wVec)\prod_{\substack{i \in [\numTup]\\ s.t. \wElem_i = 1}}\prob_i \prod_{\substack{i \in [\numTup]\\s.t. w_i = 0}}\left(1 - \prob_i\right).\]
Further, define $\rpoly(X_1,\ldots, X_\numTup)$ as the reduced version of $\poly(X_1,\ldots, X_\numTup)$, of the form
\[\rpoly(\wbit_1,\ldots, \wbit_\numTup) = \poly(\wbit_1,\ldots, \wbit_\numTup) \mod \wbit_1^2-\wbit\cdots\mod \wbit_\numTup^2 - \wbit_\numTup.\] Intuitively, $\rpoly(\wVec)$ is the expanded sum of products form of $\poly(\wVec)$ such that if any $\wbit_j$ term has an exponent $e > 1$, it is reduced to $1$, i.e. $\wbit_j^e\mapsto \wbit_j$ for any $e > 1$. The usefulness of this reduction will be seen shortly.
First, note the following fact:
\[\text{For all } (\wbit_1,\ldots, \wbit_\numTup) \in \{0, 1\}^\numTup, \poly(\wbit_1,\ldots, \wbit_\numTup) = \rpoly(\wbit_1,\ldots, \wbit_\numTup).\]
\begin{proof}
For all $b \in \{0, 1\}$ and all $e \geq 1$, $b^e = 1$.\qed
\end{proof}
Assuming each tuple has a probability $\prob = \frac{1}{2}$, we note that
\begin{Property}\label{prop:l1-rpoly-numTup}
The L-1 norm of Q is equal to $\rpoly$ times the number of possible worlds, $|\wSet| = 2^\numTup$.
\begin{equation*}
\norm{\poly}_1 = \sum_{(\wbit_1,\ldots, \wbit_\numTup) \in \{0, 1\}^\numTup} \poly(\wbit_1,\ldots, \wbit_\numTup) = 2^\numTup \cdot \rpoly(\frac{1}{2},\ldots, \frac{1}{2}).
\end{equation*}
\end{Property}
\begin{proof}
Using the fact above, we need to compute \[\sum_{(\wbit_1,\ldots, \wbit_\numTup) \in \{0, 1\}}\rpoly(\wbit_1,\ldots, \wbit_\numTup)\]. We therefore argue that
\[\sum_{(\wbit_1,\ldots, \wbit_\numTup) \in \{0, 1\}}\rpoly(\wbit_1,\ldots, \wbit_\numTup) = 2^\numTup \cdot \rpoly(\frac{1}{2},\ldots, \frac{1}{2}).\]
Note that for any single monomial, this is indeed the case since the variables in a single monomial are independent and their joint probability equals the product of the probabilities of each variable in the monomial, i.e., for monomial $M$, $\prob[M] = \prod_{x_i \in M}\prob[x_i].$ This is equivalent to the sum of all probabilities of worlds where each variable in $M$ is a $1$. Since $1$ is the identity element, it is also the case that $\prod_{x_i \in M}\prob[x_i] = \ex{M}$. (Note all other terms in the expectation will not contribute since $M$ will equal $0$, and a product containing a factor of $0$ always equals $0$.) It follows then that $\ex{M} = \rpoly(\wElem_1,\ldots, \wElem_\numTup)$. Next, observe that the value $2^\numTup$ is the number of worlds, and finally, that the product of expectation and total number of worlds yields the exact sum.
The final result follows by the fact that $\rpoly$ is a sum of monomials, and we can, by linearity of expectation, equivlently push the expectation through the sum and into the monomials.\qed
\end{proof}
\begin{Property}\label{prop:exp-rpoly}
For the case of general $\prob$, where each tuple in the TIDB is present with probability $\prob$, the expectation of polynomial $\poly$ is equal to $\rpoly(\prob,\ldots, \prob).$
\end{Property}
\begin{proof}
Note that $\poly$ has an equivalent sum of products form such that $\poly$ is the sum of monomials. By linearity of expectation, the expectation of $\poly$ is equivalent to expectation of each monomial in $\poly$.
Note further, that for the general monomial, the only operation is product. In the binary (TIDB) setting, if any of the variables in the monomial are zero, then the whole monomial becomes zero. Note that this case contributes nothing to the expectation. Notice also, that if the variables are all one, then their product is one, and the product of the identity element with the product of probabilities is the product of probabilities. This is the only condition which contributes to the expectation. It is therefore necessary to know which worlds the variables in the monomial are all equal to one, and the expectation then is the sum of the probabilities of each world for which the monomial's variables are equal to one. This sum of probabilities is also known as the marginal probability, and this sum is always equal the overall probability of the variables in the monomial. It then stands that the general monomial's expectation is equal to the product of its variable's overall probabilities. The sum of all such expecations is exactly the definition of $\rpoly(\prob,\ldots, \prob)$. Let $M$ be a monomial, $t$ be the number of monomials in $\poly$, and $x_1(\cdots x_v)$ represent the product variable(s) in $M_i$. Then
\begin{equation*}
\ex{M_1 + \cdots + M_t} = \ex{M_1} +\cdots+\ex{M_t}
\end{equation*}
For any $M_i$,
\begin{align*}
&\ex{M_i} = \ex{x_1(\cdots x_v)} = \sum_{(\wElem_1,\ldots, \wElem_N) \in \{0, 1\}^\numTup} x_1(\cdots x_v) \cdot p_1\cdots p_v\\
&=\sum_{\substack{(\wElem_1,\ldots, \wElem_\numTup) \in \{0, 1\}\\\
s.t. \forall i' \in \{j | x_J \in M_i\}\\
\wElem_{i'} = 1}} x_1(\cdots x_v) \prod_{i' \in \{j | x_j \in M_i\}} p_i \prod_{\substack{i'' \not\in \{j | x_j \in M_i\}\\ \wElem_{i''} = 1}}p_{i''}\prod_{\substack{i'' \not\in \{j | x_j \in M_i\}\\ \wElem_{i'''} = 0}} 1 - p_{i'''}\\
&=\prod_{i' \in \{j | x_j \in M_i\}}\prob_{i'}\\
&\implies \ex{M_1} +\cdots+\ex{M_t} = \prod_{i_1 \in \{j | x_j \in M_1\}}\prob_{i_1} +\cdots+\prod_{i_v \in \{j | x_j \in M_v\}} \prob_{i_v}\\
&=\rpoly(\prob_1,\ldots, \prob_\numTup).\qed
\end{align*}
\AH{I just realized, that I could have saved a lot of time by noting that for the case of TIDB, all monomial variables in $M_i$ are independent, and then using linearity of expectation to conclude the proof.}
\end{proof}
\begin{Corollary}
If $\poly$ is given to us in a sum of monomials form, the expectation of $\poly$ ($\ex{\poly}$) can be computed in $O(|\poly|)$, where $|\poly|$ denotes the total number of multiplication/addition operators.
\end{Corollary}
The corollary follows by \cref{prop:l1-rpoly-numTup} and \cref{prop:exp-rpoly}, and by the fact that the total number of operations in sum of monomials form is exactly the number of addition/multiplication operations.
\subsection{When $\poly$ is not in sum of monomials form}
We would like to argue that in the general case there is no computation of expectation in linear time.
To this end, consider the follow graph $G(V, E)$, where $|E| = m$, $|V| = \numTup$, and $i, j \in [\numTup]$. Consider the query $q_E(\wElem_1,\ldots, \wElem_\numTup) = \sum\limits_{(i, j) \in E} \wElem_i \cdot \wElem_j$.
\begin{Lemma}
If we can compute $\poly(\wElem_1,\ldots, \wElem_\numTup) = q_E(\wElem_1,\ldots, \wElem_\numTup)^3$ in T(m) time for fixed $\prob$, then we can count the number of triangles in $G$ in T(m) + O(m) time.
\end{Lemma}
\begin{Lemma}
If we can compute $\poly(\wElem_1,\ldots, \wElem_\numTup) = q_E(\wElem_1,\ldots, \wElem_\numTup)^3$ in T(m) time for O(1) distinct values of $\prob$ then we can count the number of triangles (and the number of 3-paths, the number of 3-mathcings) in $G$ in O(T(m) + m) time.
\end{Lemma}
\begin{proof}
First, let us do a warm-up by computing $\rpoly(\wElem_1,\dots, \wElem_\numTup)$ when $\poly = q_E(\wElem_1,\ldots, \wElem_\numTup)$. Before doing so, we introduce a notation. Let $\numocc{H}$ denote the number of occurrences that $H$ occurs in $G$. So, e.g., $\numocc{\ed}$ is the number of edges ($m$) in $G$.
\begin{Claim}
\begin{enumerate}
\item $\rpoly_2(\prob,\ldots, \prob) = \numocc{\ed} \cdot \prob^2 + 2\cdot \numocc{\twopath}\cdot \prob^3 + 2\cdot \numocc{\twodis}\cdot \prob^4$
\item We can compute $\rpoly_2$ in O(m) time.
\end{enumerate}
\begin{proof}
The proof basically follows by definition.
\begin{enumerate}
\item First note that
\begin{align*}
\poly_2(\wElem_1,\ldots, \wElem_\numTup) &= \sum_{(i, j) \in E} (\wElem_i\wElem_j)^2 + \sum_{(i, j), (k, \ell) \in E s.t. (i, j) \neq (k, \ell)} \wElem_i\wElem_j\wElem_k\wElem_\ell\\
&= \sum_{(i, j) \in E} (\wElem_i\wElem_j)^2 + \sum_{\substack{(i, j), (j, \ell) \in E\\s.t. i \neq \ell}}\wElem_i
\wElem_j^2\wElem_\ell + \sum_{\substack{(i, j), (k, \ell) \in E\\s.t. i \neq j \neq k \neq \ell}} \wElem_i\wElem_j\wElem_k\wElem_\ell\\
\end{align*}
By definition,
\begin{equation*}
\rpoly_2(\wElem_1,\ldots, \wElem_\numTup) = \sum_{(i, j) \in E} \wElem_i\wElem_j + \sum_{\substack{(i, j), (j, \ell) \in E\\s.t. i \neq \ell}}\wElem_i\wElem_j\wElem_\ell + \sum_{\substack{(i, j), (k, \ell) \in E\\s.t. i \neq j \neq k \neq \ell}} \wElem_i\wElem_j\wElem_k\wElem_\ell
\end{equation*}
Notice that the first term is $\numocc{\ed}\cdot \prob^2$, the second $\numocc{\twopath}\cdot \prob^3$, and the third $\numocc{\twodis}\cdot \prob^4.$
\item lal la\ldots
\end{enumerate}
\end{proof}
\end{Claim}
\end{proof}