Appendix (formerly C) D pass...almost complete.
parent
e350d9cecf
commit
ee6eb1ac2c
|
@ -18,6 +18,7 @@ such that
|
|||
\end{equation}
|
||||
\end{Theorem}
|
||||
\AR{\textbf{Aaron:}Just copied over from S4. The above text might need smoothening into the appendix.}
|
||||
The slight abuse of notation seen in $\abs{\circuit}\inparen{1,\ldots,1}$ is explained after \Cref{def:positive-circuit} and an example is given in \Cref{ex:def-pos-circ}. The only difference in the use of this notation in \Cref{lem:approx-alg} is that we include an additional exponent to square the quantity.
|
||||
|
||||
\subsection{Proof of Theorem \ref{lem:approx-alg}}\label{sec:proof-lem-approx-alg}
|
||||
\input{app_approx-alg-pseudo-code}
|
||||
|
@ -28,7 +29,7 @@ The $\onepass$ function completes in time:
|
|||
$$O\left(\size(\circuit) \cdot \multc{\log\left(\abs{\circuit(1\ldots, 1)}\right)}{\log{\size(\circuit}}\right)$$
|
||||
$\onepass$ guarantees two post-conditions: First, for each subcircuit $\vari{S}$ of $\circuit$, we have that $\vari{S}.\vari{partial}$ is set to $\abs{\vari{S}}(1,\ldots, 1)$. Second, when $\vari{S}.\type = \circplus$, \subcircuit.\lwght $= \frac{\abs{\subcircuit_\linput}(1,\ldots, 1)}{\abs{\subcircuit}(1,\ldots, 1)}$ and likewise for \subcircuit.\rwght.
|
||||
\end{Lemma}
|
||||
To prove correctness of \Cref{alg:mon-sam}, we only use the following fact that follows from the above lemma: for the modified circuit ($\circuit_{\vari{mod}}$), $\circuit_{\vari{mod}}.\vari{partial}=\abs{\circuit}(1,\dots,1)$.
|
||||
To prove correctness of \Cref{alg:mon-sam}, we only use the following fact that follows from the above lemma: for the modified circuit ($\circuit_{\vari{mod}}$) output by \onepass, $\circuit_{\vari{mod}}.\vari{partial}=\abs{\circuit}(1,\dots,1)$.
|
||||
|
||||
\begin{Lemma}\label{lem:sample}
|
||||
The function $\sampmon$ completes in time
|
||||
|
@ -38,7 +39,9 @@ $$O(\log{k} \cdot k \cdot \depth(\circuit)\cdot\multc{\log\left(\abs{\circuit}(1
|
|||
|
||||
With the above two lemmas, we are ready to argue the following result: % (proof in \Cref{sec:proofs-approx-alg}):
|
||||
\begin{Theorem}\label{lem:mon-samp}
|
||||
For any $\circuit$ with $\degree(poly(|\circuit|)) = k$, algorithm \ref{alg:mon-sam} outputs an estimate $\vari{acc}$ of $\rpoly(\prob_1,\ldots, \prob_\numvar)$ such that
|
||||
For any $\circuit$ with
|
||||
\AH{Pretty sure this is $\degree\inparen{\abs{\circuit}}$. Have to read on to be sure.}
|
||||
$\degree(poly(|\circuit|)) = k$, algorithm \ref{alg:mon-sam} outputs an estimate $\vari{acc}$ of $\rpoly(\prob_1,\ldots, \prob_\numvar)$ such that
|
||||
\[\probOf\left(\left|\vari{acc} - \rpoly(\prob_1,\ldots, \prob_\numvar)\right|> \error \cdot \abs{\circuit}(1,\ldots, 1)\right) \leq \conf,\]
|
||||
in $O\left(\left(\size(\circuit)+\frac{\log{\frac{1}{\conf}}}{\error^2} \cdot k \cdot\log{k} \cdot \depth(\circuit)\right)\cdot \multc{\log\left(\abs{\circuit}(1,\ldots, 1)\right)}{\log{\size(\circuit)}}\right)$ time.
|
||||
\end{Theorem}
|
||||
|
@ -63,12 +66,12 @@ The claim on the runtime follows from \Cref{lem:mon-samp} since
|
|||
Let us now prove \Cref{lem:mon-samp}:
|
||||
\subsection{Proof of Theorem \ref{lem:mon-samp}}\label{app:subsec-th-mon-samp}
|
||||
\begin{proof}
|
||||
Consider now the random variables $\randvar_1,\dots,\randvar_\numsamp$, where each $\randvar_\vari{i}$ is the value of $\vari{Y}_{\vari{i}}$ in \cref{alg:mon-sam} after \cref{alg:mon-sam-product} is executed. In particular, note that we have
|
||||
\[\randvar_\vari{i}= \onesymbol\inparen{\monom\mod{\mathcal{B}}\not\equiv 0}\cdot \prod_{X_i\in \var\inparen{v}} p_i,\]
|
||||
Consider now the random variables $\randvar_1,\dots,\randvar_\numsamp$, where each $\randvar_\vari{i}$ is the value of $\vari{Y}_{\vari{i}}$ in \cref{alg:mon-sam} after \cref{alg:mon-sam-product} is executed. Overloading $\isInd{\cdot}$ to receive monomial input (recall $\encMon$ is the monomial composed of the variables in the set $\monom$), we have
|
||||
\[\randvar_\vari{i}= \indicator{\inparen{\isInd{\encMon}}}\cdot \prod_{X_i\in \var\inparen{v}} p_i,\]
|
||||
where the indicator variable handles the check in \Cref{alg:check-duplicate-block}
|
||||
Then for random variable $\randvar_i$, it is the case that
|
||||
\begin{align*}
|
||||
\expct\pbox{\randvar_\vari{i}} &= \sum\limits_{(\monom, \coef) \in \expansion{{\circuit}} }\frac{\onesymbol\inparen{\encMon\mod{\mathcal{B}}\not\equiv 0}\cdot c\cdot\prod_{X_i\in \var\inparen{v}} p_i }{\abs{{\circuit}}(1,\dots,1)} \\
|
||||
\expct\pbox{\randvar_\vari{i}} &= \sum\limits_{(\monom, \coef) \in \expansion{{\circuit}} }\frac{\indicator{\inparen{\isInd{\encMon}}}\cdot c\cdot\prod_{X_i\in \var\inparen{v}} p_i }{\abs{{\circuit}}(1,\dots,1)} \\
|
||||
&= \frac{\rpoly(\prob_1,\ldots, \prob_\numvar)}{\abs{{\circuit}}(1,\ldots, 1)},
|
||||
\end{align*}
|
||||
where in the first equality we use the fact that $\vari{sgn}_{\vari{i}}\cdot \abs{\coef}=\coef$ and the second equality follows from \Cref{eq:tilde-Q-bi} with $X_i$ substituted by $\prob_i$.
|
||||
|
@ -96,7 +99,7 @@ For the claimed probability bound of $\probOf\left(\left|\vari{acc} - \rpoly(\pr
|
|||
This concludes the proof for the first claim of theorem~\ref{lem:mon-samp}. Next, we prove the claim on the runtime.
|
||||
|
||||
\paragraph*{Run-time Analysis}
|
||||
The runtime of the algorithm is dominated first by \Cref{alg:mon-sam-onepass} (which by \Cref{lem:one-pass} takes time $O\left({\size(\circuit)}\cdot \multc{\log\left(\abs{\circuit}^2(1,\ldots, 1)\right)}{\log\left(\size(\circuit)\right)}\right)$) and then by $\samplesize$ iterations of the loop in \Cref{alg:sampling-loop}. Each iteration's run time is dominated by the call to \sampmon in \Cref{alg:mon-sam-sample} (which by \Cref{lem:sample} takes $O\left(\log{k} \cdot k \cdot {\depth(\circuit)}\cdot \multc{\log\left(\abs{\circuit}^2(1,\ldots, 1)\right)}{\log\left(\size(\circuit)\right)}\right)$
|
||||
The runtime of the algorithm is dominated first by \Cref{alg:mon-sam-onepass} (which by \Cref{lem:one-pass} takes time $O\left({\size(\circuit)}\cdot \multc{\log\left(\abs{\circuit}(1,\ldots, 1)\right)}{\log\left(\size(\circuit)\right)}\right)$) and then by $\samplesize$ iterations of the loop in \Cref{alg:sampling-loop}. Each iteration's run time is dominated by the call to \sampmon in \Cref{alg:mon-sam-sample} (which by \Cref{lem:sample} takes $O\left(\log{k} \cdot k \cdot {\depth(\circuit)}\cdot \multc{\log\left(\abs{\circuit}(1,\ldots, 1)\right)}{\log\left(\size(\circuit)\right)}\right)$
|
||||
) and the check \Cref{alg:check-duplicate-block}, which by the subsequent argument takes $O(k\log{k})$ time. We sort the $O(k)$ variables by their block IDs and then check if there is a duplicate block ID or not. Combining all the times discussed here gives us the desired overall runtime.
|
||||
\qed
|
||||
\end{proof}
|
||||
|
@ -108,7 +111,7 @@ The result follows by first noting that by definition of $\gamma$, we have
|
|||
Further, since each $\prob_i\ge \prob_0$ and $\poly(\vct{X})$ (and hence $\rpoly(\vct{X})$) has degree at most $k$, we have that
|
||||
\[ \rpoly(1,\dots,1) \ge \prob_0^k\cdot \rpoly(1,\dots,1).\]
|
||||
The above two inequalities implies $\rpoly(1,\dots,1) \ge \prob_0^k\cdot (1-\gamma)\cdot \abs{{\circuit}}(1,\dots,1)$.
|
||||
Applying this bound in the runtime bound in \Cref{lem:approx-alg} gives the first claimed runtime. The final runtime of $O_k\left(\frac 1{\eps^2}\cdot\size(\circuit)\cdot \log{\frac{1}{\conf}}\cdot \multc{\log\left(\abs{\circuit}^2(1,\ldots, 1)\right)}{\log\left(\size(\circuit)\right)}\right)$ follows by noting that $\depth({\circuit})\le \size({\circuit})$ and absorbing all factors that just depend on $k$.
|
||||
Applying this bound in the runtime bound in \Cref{lem:approx-alg} gives the first claimed runtime. The final runtime of $O_k\left(\frac 1{\eps^2}\cdot\size(\circuit)\cdot \log{\frac{1}{\conf}}\cdot \multc{\log\left(\abs{\circuit}(1,\ldots, 1)\right)}{\log\left(\size(\circuit)\right)}\right)$ follows by noting that $\depth({\circuit})\le \size({\circuit})$ and absorbing all factors that just depend on $k$.
|
||||
\qed
|
||||
\end{proof}
|
||||
|
||||
|
@ -127,9 +130,9 @@ Let $\circuit$ be a tree (i.e. the sub-circuits corresponding to two children of
|
|||
\begin{proof}[Proof of \Cref{lem:C-ub-tree}]%[Proof of $\abs{\circuit}(1,\ldots, 1)$ is size $O(N)$]
|
||||
For notational simplicity define $N=\size(\circuit)$ and $k=\degree(\circuit)$.
|
||||
We use induction on $\depth(\circuit)$ to show that $\abs{\circuit}(1,\ldots, 1) \leq N^{k+1 }$.
|
||||
For the base case, we have that \depth(\circuit) $= 0$, and there can only be one node which must contain a coefficient (or constant) of $1$. In this case, $\abs{\circuit}(1,\ldots, 1) = 1$, and \size(\circuit) $= 1$, and it is true that $\abs{\circuit}(1,\ldots, 1) = 1 \leq N^{k+1} = 1^{1} = 1$.
|
||||
For the base case, we have that \depth(\circuit) $= 0$, and there can only be one node which must contain a coefficient or constant. In this case, $\abs{\circuit}(1,\ldots, 1) = 1$, and \size(\circuit) $= 1$, and by \Cref{def:degree} it is the case that $0 \leq k = \degree\inparen{\circuit} \leq 1$, and it is true that $\abs{\circuit}(1,\ldots, 1) = 1 \leq N^{k+1} = 1^{k + 1} = 1$ for $k \in \inset{0, 1}$.
|
||||
|
||||
Assume for $\ell > 0$ an arbitrary circuit \circuit of $\depth(\circuit) \leq \ell$ that it is true that $\abs{\circuit}(1,\ldots, 1) \leq N^{\deg(\circuit)+1 }$.% for $k \geq 1$ when \depth(C) $\geq 1$.
|
||||
Assume for $\ell > 0$ an arbitrary circuit \circuit of $\depth(\circuit) \leq \ell$ that it is true that $\abs{\circuit}(1,\ldots, 1) \leq N^{k+1 }$.% for $k \geq 1$ when \depth(C) $\geq 1$.
|
||||
|
||||
For the inductive step we consider a circuit \circuit such that $\depth(\circuit) = \ell + 1$. The sink can only be either a $\circmult$ or $\circplus$ gate. Let $k_\linput, k_\rinput$ denote \degree($\circuit_\linput$) and \degree($\circuit_\rinput$) respectively. Consider when sink node is $\circmult$.
|
||||
Then note that
|
||||
|
@ -140,7 +143,7 @@ For the inductive step we consider a circuit \circuit such that $\depth(\circuit
|
|||
&\leq N^{k + 1}.\nonumber
|
||||
\end{align}
|
||||
%We derive the upperbound of \Cref{eq:sumcoeff-times-upper} by noting that the maximum value of the LHS occurs when both the base and exponent are maximized.
|
||||
In the above the first inequality follows from the inductive hypothesis (and the fact that the size of either subtree is at most $N-1$) and \Cref{eq:sumcoeff-times-upper} follows by noting by \cref{def:degree} for $k = \degree(\circuit)$ we have $k=k_\linput+k_\rinput+1$.
|
||||
In the above the first inequality follows from the inductive hypothesis (and the fact that the size of either subtree is at most $N-1$) and \Cref{eq:sumcoeff-times-upper} follows by \cref{def:degree} which states that for $k = \degree(\circuit)$ we have $k=k_\linput+k_\rinput+1$.
|
||||
|
||||
For the case when the sink gate is a $\circplus$ gate, then for $N_\linput = \size(\circuit_\linput)$ and $N_\rinput = \size(\circuit_\rinput)$ we have
|
||||
\begin{align}
|
||||
|
|
|
@ -10,20 +10,21 @@ The pure expansion of a polynomial $\poly$ is formed by computing all product of
|
|||
\end{Definition}
|
||||
Note that similar in spirit to \Cref{def:reduced-bi-poly}, $\expansion{\circuit}$ \Cref{def:expand-circuit} reduces all variable exponents $e > 1$ to $e = 1$. Further, it is true that $\expansion{\circuit}$ is the pure expansion of $\circuit$.
|
||||
|
||||
In the following, recall by \cref{def:expand-circuit} that we use $\encMon$ to denote the monomial composed of the variables in $\monom$.
|
||||
%In the following, recall by \cref{def:expand-circuit} that we use $\encMon$ to denote the monomial composed of the variables in $\monom$.
|
||||
|
||||
\begin{Example}[Example for \Cref{def:expand-circuit}]\label{example:expr-tree-T}
|
||||
\begin{Example}[Example of Pure Expansion]\label{example:expr-tree-T}
|
||||
Consider the factorized representation $(X+ 2Y)(2X - Y)$ of the polynomial in \Cref{eq:poly-eg}.
|
||||
Its circuit $\circuit$ is illustrated in \Cref{fig:circuit}.
|
||||
The pure expansion of the product is $2X^2 - XY + 4XY - 2Y^2$ and the $\expansion{\circuit}$ is $[(X, 2), (XY, -1), (XY, 4), (Y, -2)]$.
|
||||
The pure expansion of the product is $2X^2 - XY + 4XY - 2Y^2$. As an additional example of \Cref{def:expand-circuit}, $\expansion{\circuit}=[(X, 2), (XY, -1), (XY, 4), (Y, -2)]$.
|
||||
\end{Example}
|
||||
$\expansion{\circuit}$ effectively\footnote{The minor difference here is that $\expansion{\circuit}$ encodes the \emph{reduced} form over the SOP pure expansion of the compressed representation, as opposed to the \abbrSMB representation} encodes the \emph{reduced} form of $\polyf\inparen{\circuit}$, decoupling each monomial into a set of variables $\monom$ and a real coefficient $\coef$.
|
||||
However, unlike the constraint on the input $\poly$ to compute $\rpoly$, the input circuit $\circuit$ does not need to be in \abbrSMB/SOP form.
|
||||
|
||||
\begin{Example}[Example for \Cref{def:positive-circuit}]\label{ex:def-pos-circ}
|
||||
Using the same factorization from \Cref{example:expr-tree-T}, $\polyf(\abs{\circuit}) = (X + 2Y)(2X + Y) = 2X^2 +XY +4XY + 2Y^2 = 2X^2 + 5XY + 2Y^2$. Note that this \textit{is not} the same as the polynomial from \Cref{eq:poly-eg}.
|
||||
Using the same factorization from \Cref{example:expr-tree-T}, $\polyf(\abs{\circuit}) = (X + 2Y)(2X + Y) = 2X^2 +XY +4XY + 2Y^2 = 2X^2 + 5XY + 2Y^2$. Note that this \textit{is not} the same as the polynomial from \Cref{eq:poly-eg}. As an example of the slight abuse of notation we alluded to, $\polyf\inparen{\abs{\circuit}\inparen{1,\ldots, 1}} =2\inparen{1}^2 + 5\inparen{1}\inparen{1} + 2\inparen{1}^2 = 9$.
|
||||
\end{Example}
|
||||
|
||||
\AH{Verify whether we need pure expansion or not.}
|
||||
%\begin{Definition}[Evaluation]\label{def:exp-poly-eval}
|
||||
%Given a circuit $\circuit$ and a valuation $\vct{a} \in \mathbb{R}^\numvar$, we define the evaluation of $\circuit$ on $\vct{a}$ as $\circuit(\vct{a}) = \polyf(\circuit)(\vct{a})$.
|
||||
%\end{Definition}
|
||||
|
|
|
@ -39,7 +39,7 @@ Please note that it is \textit{assumed} that the original call to \onepass consi
|
|||
|
||||
\subsection{$\onepass$ Example}
|
||||
\begin{Example}\label{example:one-pass}
|
||||
Let $\etree$ encode the expression $(X_1 + X_2)(X_1 - X_2) + X_2^2$. After one pass, \Cref{alg:one-pass-iter} would have computed the following weight distribution. For the two inputs of the root $+$ node $\etree$, $\etree.\lwght = \frac{4}{5}$ and $\etree.\rwght = \frac{1}{5}$. Similarly, let $\stree$ denote the left-subtree of $\etree_{\lchild}$, $\stree.\lwght = \stree.\rwght = \frac{1}{2}$. This is depicted in \Cref{fig:expr-tree-T-wght}.
|
||||
Let $\etree$ encode the expression $(X_1 + X_2)(X_1 - X_2) + X_2^2$. After one pass, \Cref{alg:one-pass-iter} would have computed the following weight distribution. For the two inputs of the sink gate $\circuit$, $\circuit.\lwght = \frac{4}{5}$ and $\circuit.\rwght = \frac{1}{5}$. Similarly, for $\stree$ denoting the left input of $\circuit_{\lchild}$, $\stree.\lwght = \stree.\rwght = \frac{1}{2}$. This is depicted in \Cref{fig:expr-tree-T-wght}.
|
||||
\end{Example}
|
||||
|
||||
\begin{figure}[h!]
|
||||
|
@ -117,10 +117,11 @@ Please note that it is \textit{assumed} that the original call to \onepass consi
|
|||
\node[tree_node] (b3) at (1.6, 2.5) {$\boldsymbol{\circplus}$};
|
||||
%Fourth level
|
||||
\node[tree_node] (a4) at (1.5, 3.75) {$\boldsymbol{\circmult}$};
|
||||
\node[tree_node] (b4) at (2.8, 3.75) {$\boldsymbol{\circplus}$};
|
||||
\node[tree_node] (b4) at (2.8, 4) {$\boldsymbol{\circplus}$};
|
||||
\node[above right=0.15cm of b4, inner sep=0pt, font=\bfseries](labelC) {$\circuit$};
|
||||
|
||||
\draw[->] (a1) edge[right] node{$\frac{1}{2}$} (a3);
|
||||
\draw[->] (b1) -- (c2);
|
||||
\draw[->] (b1) -- (b2);
|
||||
\draw[->] (a1) -- (b2);
|
||||
\draw[->] (a1) edge[bend left=15] (c2);
|
||||
\draw[->] (a1) edge[bend right=15] (c2);
|
||||
|
@ -131,6 +132,7 @@ Please note that it is \textit{assumed} that the original call to \onepass consi
|
|||
\draw[->] (a3) -- (a4);
|
||||
\draw[->] (b3) -- (a4);
|
||||
\draw[->] (a4) edge[above] node{$\frac{4}{5}$} (b4);
|
||||
\draw[black] (b4) -- (labelC);
|
||||
% \draw[->] (b3) -- (a4);
|
||||
% \draw[->] (a4) -- (2.25, 2.75);
|
||||
\end{tikzpicture}
|
||||
|
@ -174,21 +176,22 @@ Please note that it is \textit{assumed} that the original call to \onepass consi
|
|||
|
||||
\subsection{Proof of \onepass (\Cref{lem:one-pass})}\label{sec:proof-one-pass}
|
||||
\begin{proof}%[Proof of \Cref{lem:one-pass}]
|
||||
We prove the correct computation of \prt, \lwght, \rwght values on \circuit by induction over the number of iterations in the topological order \topord (line~\ref{alg:one-pass-loop}) of the input circuit \circuit. Note that \topord follows the standard definition of a topological ordering over the DAG structure of \circuit.
|
||||
We prove the correct computation of \prt, \lwght, \rwght values on \circuit by induction over the number of iterations in the topological order \topord (line~\ref{alg:one-pass-loop}) of the input circuit \circuit. \topord follows the standard definition of a topological ordering over the DAG structure of \circuit.
|
||||
|
||||
For the base case, we have only one gate, which by definition is a source gate and must be either \var or \tnum. In this case, as per \cref{eq:T-all-ones}, lines~\ref{alg:one-pass-var} and~\ref{alg:one-pass-num} correctly compute \circuit.\prt as $1$ and \circuit.\val respectively.
|
||||
For the base case, we have only one gate, which by definition is a source gate and must be either \var or \tnum. In this case, as per \cref{eq:T-all-ones}, lines~\ref{alg:one-pass-var} and~\ref{alg:one-pass-num} correctly compute \circuit.\prt as $1$.% and \circuit.\val respectively.
|
||||
|
||||
For the inductive hypothesis, assume that \onepass correctly computes \subcircuit.\prt, \subcircuit.\lwght, and \subcircuit.\rwght for all gates \gate in \circuit with $k \geq 0$ iterations over \topord.
|
||||
\AH{Notes above: Algo uses Reduce, but we don't use that anymore. The figure needs to change to a circuit.}
|
||||
We now prove for $k + 1$ iterations that \onepass correctly computes the \prt, \lwght, and \rwght values for each gate $\gate_\vari{i}$ in \circuit for $i \in [k + 1]$.
|
||||
Note that the $\gate_\vari{k + 1}$ must be in the last ordering of all gates $\gate_\vari{i}$. Note that for \size(\circuit) > 1, if $\gate_{k+1}$ is a leaf node, we are back to the base case. Otherwise $\gate_{k + 1}$ is an internal node $\gate_\vari{s}.\type = \circplus$ or $\gate_\vari{s}.\type = \circmult$, which both require binary input.
|
||||
The $\gate_\vari{k + 1}$ must be in the last ordering of all gates $\gate_\vari{i}$. When \size(\circuit) > 1, if $\gate_{k+1}$ is a leaf node, we are back to the base case. Otherwise $\gate_{k + 1}$ is an internal node %$\gate_\vari{s}.\type = \circplus$ or $\gate_\vari{s}.\type = \circmult$, which both require
|
||||
which requires binary input.
|
||||
|
||||
When $\gate_{k+1}.\type = \circplus$, then by line~\ref{alg:one-pass-plus} $\gate_{k+1}$.\prt $= \gate_{{k+1}_\lchild}$.\prt $+ \gate_{{k+1}_\rchild}$.\prt, a correct computation, as per \cref{eq:T-all-ones}. Further, lines~\ref{alg:one-pass-lwght} and~\ref{alg:one-pass-rwght} compute $\gate_{{k+1}}.\lwght = \frac{\gate_{{k+1}_\lchild}.\prt}{\gate_{{k+1}}.\prt}$ and analogously for $\gate_{{k+1}}.\rwght$. Note that all values needed for each computation have been correctly computed by the inductive hypothesis.
|
||||
When $\gate_{k+1}.\type = \circplus$, then by line~\ref{alg:one-pass-plus} $\gate_{k+1}$.\prt $= \gate_{{k+1}_\lchild}$.\prt $+ \gate_{{k+1}_\rchild}$.\prt, a correct computation, as per \cref{eq:T-all-ones}. Further, lines~\ref{alg:one-pass-lwght} and~\ref{alg:one-pass-rwght} compute $\gate_{{k+1}}.\lwght = \frac{\gate_{{k+1}_\lchild}.\prt}{\gate_{{k+1}}.\prt}$ and analogously for $\gate_{{k+1}}.\rwght$. All values needed for each computation have been correctly computed by the inductive hypothesis.
|
||||
|
||||
When $\gate_{k+1}.\type = \circmult$, then line~\ref{alg:one-pass-mult} computes $\gate_{k+1}.\prt = \gate_{{k+1}_\lchild.\prt} \circmult \gate_{{k+1}_\rchild}.\prt$, which indeed by \cref{eq:T-all-ones} is correct.
|
||||
When $\gate_{k+1}.\type = \circmult$, then line~\ref{alg:one-pass-mult} computes $\gate_{k+1}.\prt = \gate_{{k+1}_\lchild.\prt} \circmult \gate_{{k+1}_\rchild}.\prt$, which indeed by \cref{eq:T-all-ones} is correct. This concludes the proof of correctness.
|
||||
|
||||
\paragraph*{Runtime Analysis}
|
||||
It is known that $\topord(G)$ is computable in linear time. Next, each of the $\size(\circuit)$ iterations of the loop in \Cref{alg:one-pass-loop} take $O\left( \multc{\log\left(\abs{\circuit(1\ldots, 1)}\right)}{\log{\size(\circuit)}}\right)$ time. It is easy to see that each of all the numbers which the algorithm computes is at most $\abs{\circuit}(1,\dots,1)$. Hence, by definition each such operation takes $\multc{\log\left(\abs{\circuit(1\ldots, 1)}\right)}{\log{\size(\circuit)}}$ time, which proves the claimed runtime.
|
||||
It is known that $\topord(G)$ is computable in linear time. There are $\size(\circuit)$ iterations, each of which takes $O\left( \multc{\log\left(\abs{\circuit(1\ldots, 1)}\right)}{\log\inparen{\size(\circuit)}}\right)$ time. This can be seen since each of all the numbers which the algorithm computes is at most $\abs{\circuit}(1,\dots,1)$. Hence, by definition each such operation takes $\multc{\log\left(\abs{\circuit(1\ldots, 1)}\right)}{\log{\size(\circuit)}}$ time, which proves the claimed runtime.
|
||||
\qed
|
||||
\end{proof}
|
||||
%In general it is known that an arithmetic computation which requires $M$ bits takes $O(\frac{\log{M}}{\log{N}})$ time for an input size $N$. Since each of the arithmetic operations at a given gate has a bit size of $O(\log{\abs{\circuit}(1,\ldots, 1)})$, thus, we obtain the general runtime of $O\left(\size(\circuit)\cdot \frac{\log{\abs{\circuit}(1,\ldots, 1)}}{\log{\size(\circuit)}}\right)$.
|
||||
|
|
Loading…
Reference in New Issue