Probability macros for the appendix

This commit is contained in:
Aaron Huber 2020-12-20 00:56:46 -05:00
parent 196e23db8e
commit cb847ede46
2 changed files with 27 additions and 28 deletions

View file

@ -5,7 +5,6 @@
\subsection{Supplementary Material for~\Cref{prop:expection-of-polynom}}\label{subsec:supp-mat-background}
To justify the use of $\semNX$-databases, we need to show that we can encode any $\semN$-PDB in this way and that the query semantics over this representation coincides with query semantics over $\semN$-PDB. For that it will be opportune to define representation systems for $\semN$-PDBs.\BG{cite}
Before we proceed, unless otherwise mentioned, all subsequent proofs for~\Cref{sec:background} can be found in~\Cref{sec:proofs-background}.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{Definition}[Representation System]\label{def:representation-syste}
@ -29,12 +28,12 @@ Formally, an $\semNX$-PDB is an $\semNX$-database $\db$ and a probability distri
An $\semNX$-PDB $\pxdb$ over variables $\vct{X} = \{X_1, \ldots, X_n\}$ is a tuple $(\db,\pd)$ where $\db$ is an $\semNX$-database and $\pd$ is a probability distribution over $\vct{w} \in \{0,1\}^n$. We use $\assign_{\vct{w}}$ to denote the assignment corresponding to $\vct{w} \in \{0,1\}^n$. The $\semN$-PDB $\rmod(\pxdb) = (\idb, \pd')$ encoded by $\pxdb$ is defined as:
\begin{align*}
\idb & = \{ \assign_{\vct{w}}(\pxdb) \mid \vct{w} \in \{0,1\}^n \} \\
\pd'(\db) & = \sum_{\vct{w} \in \{0,1\}^n: \assign_{\vct{w}}(\pxdb) = \db} \pd(\vct{w})
\forall \db \in \idb: \probOf'(\db) & = \sum_{\vct{w} \in \{0,1\}^n: \assign_{\vct{w}}(\pxdb) = \db} \probOf(\vct{w})
\end{align*}
\end{Definition}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
For instance, consider a $\pxdb$ consisting of a single tuple $\tup_1 = (1)$ annotated with $X_1 + X_2$ with probability distribution $\pd([0,0]) = 0$, $\pd([0,1]) = 0$, $\pd([1,0]) = 0.3$ and $\pd([1,1]) = 0.7$. This $\semNX$-PDB encodes two possible worlds (with non-zero) probability that we denote using their world vectors.
For instance, consider a $\pxdb$ consisting of a single tuple $\tup_1 = (1)$ annotated with $X_1 + X_2$ with probability distribution $\probOf([0,0]) = 0$, $\probOf([0,1]) = 0$, $\probOf([1,0]) = 0.3$ and $\probOf([1,1]) = 0.7$. This $\semNX$-PDB encodes two possible worlds (with non-zero) probability that we denote using their world vectors.
%
\[
D_{[0,1]}(\tup_1) = 1 \hspace{0.3cm} \mathbf{and} \hspace{0.3cm} D_{[1,1]}(\tup_1) = 2
@ -68,7 +67,7 @@ The closure under $\raPlus$ queries follows from the fact that an assignment $\v
Now let us consider computing the expected multiplicity of a tuple $\tup$ in the result of a query $\query$ over an $\semN$-PDB $\pdb$ using the annotation of $\tup$ in the result of evaluating $\query$ over an $\semNX$-PDB $\pxdb$ for which $\rmod(\pxdb) = \pdb$. The expectation of the polynomial $\poly = \query(\pxdb)(\tup)$ based on the probability distribution of $\pxdb$ over the variables in $\pxdb$ is:
\begin{equation}
\expct_{\vct{W} \sim \pd}\pbox{\poly(\vct{W})} = \sum_{\vct{w} \in \{0,1\}^n} \query(\assign_{\vct{w}}(\pxdb))(\tup) \cdot \pd(\vct{w})\label{eq:expect-q-nx}
\expct_{\vct{W} \sim \pd}\pbox{\poly(\vct{W})} = \sum_{\vct{w} \in \{0,1\}^n} \query(\assign_{\vct{w}}(\pxdb))(\tup) \cdot \probOf(\vct{w})\label{eq:expect-q-nx}
\end{equation}
Since $\semNX$-PDBs $\pxdb$ are a complete representation system for $\semN$-PDBs which are closed under $\raPlus$, computing the expectation of the multiplicity of a tuple $t$ in the result of an $\raPlus$ query over the $\semN$-PDB $\rmod(\pxdb)$, is the same as computing the expectation of the polynomial $\query(\pxdb)(t)$.
@ -77,22 +76,22 @@ Since $\semNX$-PDBs $\pxdb$ are a complete representation system for $\semN$-PDB
\subsection{Proof of~\Cref{prop:expection-of-polynom}}
\label{subsec:expectation-of-polynom-proof}
We need to prove for $\semN$-PDB $\pdb = (\idb,\pd)$ and $\semNX$-PDB $\pxdb = (\db',\pd')$ where $\rmod(\pxdb) = \pdb$ that $\expct_{\db \sim \pd}[\query(\db)(t)] = \expct_{\vct{w} \sim \pd'}\pbox{\polyForTuple(\vct{w})}$
We need to prove for $\semN$-PDB $\pdb = (\idb,\pd)$ and $\semNX$-PDB $\pxdb = (\db',\pd')$ where $\rmod(\pxdb) = \pdb$ that $\expct_{\db \sim \pd}[\query(\db)(t)] = \expct_{\vct{W} \sim \pd'}\pbox{\polyForTuple(\vct{W})}$
By expanding $\polyForTuple$ and the expectation we have:
\begin{align*}
\expct_{\vct{w} \sim \pd'}\pbox{\polyForTuple(\vct{w})}
& = \sum_{\vct{w} \in \{0,1\}^n}\pd'(\vct{w}) \cdot Q(\pxdb)(t)(\vct{w})\\
\expct_{\vct{W} \sim \pd'}\pbox{\polyForTuple(\vct{W})}
& = \sum_{\vct{w} \in \{0,1\}^n}\probOf'(\vct{w}) \cdot Q(\pxdb)(t)(\vct{w})\\
\intertext{From $\rmod(\pxdb) = \pdb$, we have that the range of $\assign_{\vct{w}(\pxdb)}$ is $\idb$, so}
& = \sum_{\db \in \idb}\;\;\sum_{\vct{w} \in \{0,1\}^n : \assign_{\vct{w}}(\pxdb) = \db}\pd'(\vct{w}) \cdot Q(\pxdb)(t)(\vct{w})\\
& = \sum_{\db \in \idb}\;\;\sum_{\vct{w} \in \{0,1\}^n : \assign_{\vct{w}}(\pxdb) = \db}\probOf'(\vct{w}) \cdot Q(\pxdb)(t)(\vct{w})\\
\intertext{In the inner sum, $\assign_{\vct{w}}(\pxdb) = \db$, so by distributivity of $+$ over $\times$}
& = \sum_{\db \in \idb}\query(\db)(t)\sum_{\vct{w} \in \{0,1\}^n : \assign_{\vct{w}}(\pxdb) = \db}\pd'(\vct{w})\\
\intertext{From the definition of $P$, given $\rmod(\pxdb) = \pdb$, we get}
& = \sum_{\db \in \idb}\query(\db)(t) \cdot \pd(D) \quad = \expct_{\db \sim \pd}[\query(\db)(t)]
& = \sum_{\db \in \idb}\query(\db)(t)\sum_{\vct{w} \in \{0,1\}^n : \assign_{\vct{w}}(\pxdb) = \db}\probOf'(\vct{w})\\
\intertext{From the definition of $\probOf$, given $\rmod(\pxdb) = \pdb$, we get}
& = \sum_{\db \in \idb}\query(\db)(t) \cdot \probOf(D) \quad = \expct_{\db \sim \pd}[\query(\db)(t)]
\end{align*}
\subsection{Supplementary Material for~\Cref{subsec:tidbs-and-bidbs}}\label{subsec:supp-mat-ti-bi-def}
Two important subclasses of $\semNX$-PDBs that are of interest to us are the bag versions of tuple-independent databases (\tis) and block-independent databases (\bis). Under set semantics, a \ti is a deterministic database $\db$ where each tuple $\tup$ is assigned a probability $\prob(\tup)$. The set of possible worlds represented by a \ti $\db$ is all subsets of $\db$. The probability of each world is the product of the probabilities of all tuples that exist with one minus the probability of all tuples of $\db$ that are not part of this world, i.e., tuples are treated as independent random events. In a \bi, we also assign each tuple a probability, but additionally partition $\db$ into blocks. The possible worlds of a \bi $\db$ are all subsets of $\db$ that contain at most one tuple from each block. Note then that the tuples sharing the same block are disjoint, and the sum of the probabilitites of all the tuples in the same block $\block$ is $1$. The probability of such a world is the product of the probabilities of all tuples present in the world. %and one minus the sum of the probabilities of all tuples from blocks for which no tuple is present in the world.
Two important subclasses of $\semNX$-PDBs that are of interest to us are the bag versions of tuple-independent databases (\tis) and block-independent databases (\bis). Under set semantics, a \ti is a deterministic database $\db$ where each tuple $\tup$ is assigned a probability $\prob_\tup$. The set of possible worlds represented by a \ti $\db$ is all subsets of $\db$. The probability of each world is the product of the probabilities of all tuples that exist with one minus the probability of all tuples of $\db$ that are not part of this world, i.e., tuples are treated as independent random events. In a \bi, we also assign each tuple a probability, but additionally partition $\db$ into blocks. The possible worlds of a \bi $\db$ are all subsets of $\db$ that contain at most one tuple from each block. Note then that the tuples sharing the same block are disjoint, and the sum of the probabilitites of all the tuples in the same block $\block$ is $1$. The probability of such a world is the product of the probabilities of all tuples present in the world. %and one minus the sum of the probabilities of all tuples from blocks for which no tuple is present in the world.
For bag \tis and \bis, we define the probability of a tuple to be the probability that the tuple exists with multiplicity at least $1$.
\AH{This part \emph{below} needs more work if we include it.}
@ -100,7 +99,7 @@ Note that the main difference to the standard definitions of \tis and \bis is th
%
\begin{align}\label{eq:tidb-expectation}
\expct_{\vct{X} \sim \pd^{(\vct{p})}}\pbox{\poly(\vct{X})} = \sum\limits_{\vct{w} \in \{0, 1\}^\numvar} \poly(\vct{w})\prod_{\substack{i \in [\numvar]\\ s.t. \wElem_i = 1}}\prob_i \prod_{\substack{i \in [\numvar]\\s.t. w_i = 0}}\left(1 - \prob_i\right).
\expct_{\vct{W} \sim \pd^{(\vct{p})}}\pbox{\poly(\vct{W})} = \sum\limits_{\vct{w} \in \{0, 1\}^\numvar} \poly(\vct{w})\prod_{\substack{i \in [\numvar]\\ s.t. \wElem_i = 1}}\prob_i \prod_{\substack{i \in [\numvar]\\s.t. w_i = 0}}\left(1 - \prob_i\right).
\end{align}
%
\BG{Do we need the BIDB formula?}
@ -143,9 +142,9 @@ Let $\poly$ be the generalized polynomial, i.e., the polynomial of $\numvar$ var
\[\poly(X_1,\ldots, X_\numvar) = \sum_{\vct{d} \in \{0,\ldots, B\}^\numvar}q_{\vct{d}}\cdot \prod_{\substack{i = 1\\s.t. d_i \geq 1}}^\numvar X_i^{d_i}\].
Then, assigning $\vct{w}$ to $\vct{X}$, for expectation we have
Then, in expectation we have
\begin{align}
\expct_{\vct{w}}\pbox{\poly(\vct{w})} &= \sum_{\vct{d} \in \eta}q_{\vct{d}}\cdot \expct_{\vct{w}}\pbox{\prod_{\substack{i = 1\\s.t. d_i \geq 1}}^\numvar w_i^{d_i}}\label{p1-s1}\\
\expct_{\vct{W}}\pbox{\poly(\vct{W})} &= \sum_{\vct{d} \in \eta}q_{\vct{d}}\cdot \expct_{\vct{w}}\pbox{\prod_{\substack{i = 1\\s.t. d_i \geq 1}}^\numvar w_i^{d_i}}\label{p1-s1}\\
&= \sum_{\vct{d} \in \eta}q_{\vct{d}}\cdot \prod_{\substack{i = 1\\s.t. d_i \geq 1}}^\numvar \expct_{\vct{w}}\pbox{w_i^{d_i}}\label{p1-s2}\\
&= \sum_{\vct{d} \in \eta}q_{\vct{d}}\cdot \prod_{\substack{i = 1\\s.t. d_i \geq 1}}^\numvar \expct_{\vct{w}}\pbox{w_i}\label{p1-s3}\\
&= \sum_{\vct{d} \in \eta}q_{\vct{d}}\cdot \prod_{\substack{i = 1\\s.t. d_i \geq 1}}^\numvar \prob_i\label{p1-s4}\\
@ -381,7 +380,7 @@ The number of triangles in $\graph{\ell}$ for $\ell \geq 2$ will always be $0$ f
Before proving~\Cref{lem:mon-samp}, we use it to argue our main result,~\Cref{lem:approx-alg}:
\subsection{Proof of Theorem \ref{lem:approx-alg}}
Set $\mathcal{E}=\approxq(\etree, (p_1,\dots,p_\numvar),$ $\conf, \error')$, where
Set $\mathcal{E}=\approxq(\etree, (\prob_1,\dots,\prob_\numvar),$ $\conf, \error')$, where
\[\error' = \error \cdot \frac{\rpoly(\prob_1,\ldots, \prob_\numvar)\cdot (1 - \gamma)}{\abs{\etree}(1,\ldots, 1)},\]
which achieves the claimed accuracy bound on $\mathcal{E}$.
@ -413,13 +412,13 @@ Let $\empmean = \frac{1}{\samplesize}\sum_{i = 1}^{\samplesize}\randvar_i$. It
Hoeffding's inequality states that if we know that each $\randvar_i$ (which are all independent) always lie in the intervals $[a_i, b_i]$, then it is true that
\begin{equation*}
P\left(\left|\empmean - \expct\pbox{\empmean}\right| \geq \error\right) \leq 2\exp{\left(-\frac{2\samplesize^2\error^2}{\sum_{i = 1}^{\samplesize}(b_i -a_i)^2}\right)}.
\probOf\left(\left|\empmean - \expct\pbox{\empmean}\right| \geq \error\right) \leq 2\exp{\left(-\frac{2\samplesize^2\error^2}{\sum_{i = 1}^{\samplesize}(b_i -a_i)^2}\right)}.
\end{equation*}
Line ~\ref{alg:mon-sam-sample} shows that $\vari{sgn}_\vari{i}$ has a value in $\{-1, 1\}$ that is multiplied with $O(k)$ $p_i\in [0, 1]$, the range for each $\randvar_i$ is $[-1, 1]$.
Line ~\ref{alg:mon-sam-sample} shows that $\vari{sgn}_\vari{i}$ has a value in $\{-1, 1\}$ that is multiplied with $O(k)$ $\prob_i\in [0, 1]$, the range for each $\randvar_i$ is $[-1, 1]$.
Using Hoeffding's inequality, we then get:
\begin{equation*}
P\pbox{~\left| \empmean - \expct\pbox{\empmean} ~\right| \geq \error} \leq 2\exp{\left(-\frac{2\samplesize^2\error^2}{2^2 \samplesize}\right)} = 2\exp{\left(-\frac{\samplesize\error^2}{2 }\right)}\leq \conf,
\probOf\pbox{~\left| \empmean - \expct\pbox{\empmean} ~\right| \geq \error} \leq 2\exp{\left(-\frac{2\samplesize^2\error^2}{2^2 \samplesize}\right)} = 2\exp{\left(-\frac{\samplesize\error^2}{2 }\right)}\leq \conf,
\end{equation*}
where the last inequality follows from our choice of $\samplesize$ in~\Cref{alg:mon-sam-global2}.
@ -433,9 +432,9 @@ The result follows by first noting that by definition of $\gamma$, we have
%\AH{Just wondering why you use $\geq$ as opposed to $=$?}
%\AR{Ah, right-- fixed}
\[\rpoly(1,\dots,1)= (1-\gamma)\cdot \abs{\etree}(1,\dots,1).\]
Further, since each $p_i\ge p_0$ and $\poly(\vct{X})$ (and hence $\rpoly(\vct{X})$) has degree at most $k$, we have that
\[ \rpoly(1,\dots,1) \ge p_0^k\cdot \rpoly(1,\dots,1).\]
The above two inequalities implies $\rpoly(1,\dots,1) \ge p_0^k\cdot (1-\gamma)\cdot \abs{\etree}(1,\dots,1)$.
Further, since each $\prob_i\ge \prob_0$ and $\poly(\vct{X})$ (and hence $\rpoly(\vct{X})$) has degree at most $k$, we have that
\[ \rpoly(1,\dots,1) \ge \prob_0^k\cdot \rpoly(1,\dots,1).\]
The above two inequalities implies $\rpoly(1,\dots,1) \ge \prob_0^k\cdot (1-\gamma)\cdot \abs{\etree}(1,\dots,1)$.
%\AH{This looks really nice!}
Applying this bound in the runtime bound in~\Cref{lem:approx-alg} gives the first claimed runtime. The final runtime of $O_k\left(\frac 1{\eps^2}\cdot\treesize(\etree)\cdot \log{\frac{1}{\conf}}\right)$ follows by noting that $depth(\etree)\le \treesize(\etree)$ and absorbing all factors that just depend on $k$.
@ -559,7 +558,7 @@ Now prove that lemma ~\ref{lem:one-pass} holds for $k + 1$. Notice that $\etree
The runtime for \textsc{OnePass} is fairly straight forward. Note first that each node is visited at most one time. Second, for each type of node visited, it can be trivially verified that there are only a constant number of operations. This concludes then with a $O\left(\treesize(\etree)\right)$ runtime.
\subsubsection{Proof of~\Cref{lem:sample}}
\subsection{Proof of~\Cref{lem:sample}}
First, we need to show that $\sampmon$ indeed returns a monomial $\monom$,\footnote{Technically it returns $\var(\monom)$ but for less cumbersome notation we will refer to $\var(\monom)$ simply by $\monom$ in this proof.} such that $(\monom, \coef)$ is in $\expandtree{\etree}$, which we do by induction on the depth of $\etree$.
For the base case, let the depth $d$ of $\etree$ be $0$. We have that the root node is either a constant $\coef$ for which by line ~\ref{alg:sample-num-return} we return $\{~\}$, or we have that $\etree.\type = \var$ and $\etree.\val = x$, and by line ~\ref{alg:sample-var-return} we return $\{x\}$. Both cases sample a monomial%satisfy ~\cref{def:monomial}
@ -585,8 +584,8 @@ Consider the case when the root is $\times$. Note that we are sampling a term f
For the case when $\etree.\val = +$, \sampmon ~will sample monomial $\monom$ from one of its children. By inductive hypothesis we know that any $\monom_\lchild$ in $\expandtree{\etree_\lchild}$ and any $\monom_\rchild$ in $\expandtree{\etree_\rchild}$ will both be sampled with correct probability $\frac{|\coef_{\monom_\lchild}|}{\etree_{\lchild}(1,\ldots, 1)}$ and $\frac{|\coef_{\monom_\rchild}|}{|\etree_\rchild|(1,\ldots, 1)}$, where either $\monom_\lchild$ or $\monom_\rchild$ will equal $\monom$, depending on whether $\etree_\lchild$ or $\etree_\rchild$ is sampled. Assume that $\monom$ is sampled from $\etree_\lchild$, and note that a symmetric argument holds for the case when $\monom$ is sampled from $\etree_\rchild$. Notice also that the probability of choosing $\etree_\lchild$ from $\etree$ is $\frac{\abs{\etree_\lchild}\polyinput{1}{1}}{\abs{\etree_\lchild}\polyinput{1}{1} + \abs{\etree_\rchild}\polyinput{1}{1}}$ as computed by $\onepass$. Then, since $\sampmon$ goes top-down, and each sampling choice is independent (which follows from the randomness in the root of $\etree$ being independent from the randomness used in its subtrees), the probability for $\monom$ to be sampled from $\etree$ is equal to the product of the probability that $\etree_\lchild$ is sampled from $\etree$ and $\monom$ is sampled in $\etree_\lchild$, and
\begin{align*}
&P(\sampmon(\etree) = \monom) = \\
&P(\sampmon(\etree_\lchild) = \monom) \cdot P(SampledChild(\etree) = \etree_\lchild)\\
&\probOf(\sampmon(\etree) = \monom) = \\
&\probOf(\sampmon(\etree_\lchild) = \monom) \cdot \probOf(SampledChild(\etree) = \etree_\lchild)\\
&= \frac{|\coef_\monom|}{|\etree_\lchild|(1,\ldots, 1)} \cdot \frac{\abs{\etree_\lchild}(1,\ldots, 1)}{|\etree_\lchild|(1,\ldots, 1) + |\etree_\rchild|(1,\ldots, 1)}\\
&= \frac{|\coef_\monom|}{\abs{\etree}(1,\ldots, 1)},
\end{align*}

View file

@ -59,7 +59,7 @@ Consider the following relations:
In the above,~\cref{eq:ls-2-1} follows by \cref{lem:tri}. Similarly ~\cref{eq:ls-2-2} follows by both \cref{lem:3m-G2} and \cref{lem:3p-G2}. Finally, ~\cref{eq:ls-2-3} follows by a simple rearrangement of terms.
Now, rearranging the terms in the identity of~\cref{lem:qE3-exp} and recalling $p\ne 0$ we deduce the following identities:
Now, rearranging the terms in the identity of~\cref{lem:qE3-exp} and recalling $\prob\ne 0$ we deduce the following identities:
\begin{align}
&\frac{\rpoly^3_{\graph{2}}(\prob,\ldots, \prob)}{6\prob^3} - \frac{\numocc{\graph{2}}{\ed}}{6\prob} - \numocc{\graph{2}}{\twopath} - \numocc{\graph{2}}{\twodis}\prob \nonumber\\
&- \numocc{\graph{2}}{\oneint}\prob - \big(\numocc{\graph{2}}{\twopathdis} + 3\numocc{\graph{2}}{\threedis}\big)\prob^2 \nonumber\\
@ -131,7 +131,7 @@ Using \cref{lem:3m-G3}, \cref{lem:3p-G3}, and \cref{lem:tri}, we derive % starti
&+ \numocc{\graph{1}}{\ed}\prob + 2 \cdot \numocc{\graph{1}}{\twopath}\prob. \label{eq:lem3-G3-1}
\end{align}
By the identity in~\cref{lem:qE3-exp} (along with the fact that $p\ne 0$), we get:
By the identity in~\cref{lem:qE3-exp} (along with the fact that $\prob\ne 0$), we get:
\begin{align}
&\frac{\rpoly_{\graph{3}}(\prob,\ldots, \prob)}{6\prob^3} - \frac{\numocc{\graph{3}}{\ed}}{6\prob} - \numocc{\graph{3}}{\twopath} - \numocc{\graph{3}}{\twodis}\prob \nonumber\\
& - \numocc{\graph{3}}{\oneint}\prob - \big(\numocc{\graph{3}}{\twopathdis} + 3\numocc{\graph{3}}{\threedis}\big)\prob^2\nonumber\\
@ -192,7 +192,7 @@ Note that if $\mtrix{\rpoly}$ has full rank then one can compute $x,y,z$ in $O(1
%Now we seek to show that all rows of the system are indeed independent.
%
%The method of minors can be used to compute the determinant,
To show that $\mtrix{\rpoly}$ indeed has full rank, we will show that $\dtrm{\mtrix{\rpoly}}\ne 0$ for every $p\in (0,1)$. Towards that end, we will show that $\dtrm{\mtrix{\rpoly}}$ as a polynomial in $p$ does not have any root in $(0,1)$.
To show that $\mtrix{\rpoly}$ indeed has full rank, we will show that $\dtrm{\mtrix{\rpoly}}\ne 0$ for every $\prob\in (0,1)$. Towards that end, we will show that $\dtrm{\mtrix{\rpoly}}$ as a polynomial in $\prob$ does not have any root in $(0,1)$.
We also make use of the fact that for a matrix with entries $ab, ac, ad,$ and $ae$, the determinant is $a^2be - a^2cd = a^2\cdot\begin{vmatrix} b&c \\d &e\end{vmatrix}$. We have $\dtrm{\mtrix{\rpoly}}$ is
\begin{align*}